id
stringlengths 3
8
| content
stringlengths 100
981k
|
|---|---|
1657129
|
from http.server import HTTPServer, BaseHTTPRequestHandler
from optparse import OptionParser
from log import *
print ('''\033[1;31m \n
_ _____
| | / ____|
___ ___| |__ ___| (___ ___ _ ____ _____ _ __
/ _ \/ __| '_ \ / _ \\___ \ / _ \ '__\ \ / / _ \ '__|
| __/ (__| | | | (_) |___) | __/ | \ V / __/ |
\___|\___|_| |_|\___/_____/ \___|_| \_/ \___|_|
''')
print ("\033[1;34m[*]___author___: @noobpk\033[1;37m")
print ("\033[1;34m[*]___version___: 1.0\033[1;37m")
print ("")
ECHO_PORT = 27080
class RequestHandler(BaseHTTPRequestHandler):
def do_FRIDA(self):
request_path = self.path
request_headers = self.headers
content_length = request_headers.get('content-length')
# length = int(content_length[0]) if content_length else 0
length = int(content_length) if content_length else 0
self.send_response(200)
self.end_headers()
self.wfile.write(self.rfile.read(length))
def main():
try:
logger.info('[*] Listening on 127.0.0.1:%d' % ECHO_PORT)
server = HTTPServer(('', ECHO_PORT), RequestHandler)
server.serve_forever()
except KeyboardInterrupt:
logger.info("Stop echoServer!!")
if __name__ == "__main__":
logger.info('[*] Starting echoServer on port %d' % ECHO_PORT)
main()
|
1657137
|
import os
import shutil
import pytest
from leapp.exceptions import StopActorExecutionError
from leapp.libraries.actor import upgradeinitramfsgenerator
from leapp.libraries.common.config import architecture
from leapp.libraries.common.testutils import CurrentActorMocked, logger_mocked, produce_mocked
from leapp.models import (
BootContent,
CopyFile,
DracutModule,
RequiredUpgradeInitramPackages, # deprecated
TargetUserSpaceUpgradeTasks,
UpgradeDracutModule, # deprecated
UpgradeInitramfsTasks,
)
from leapp.utils.deprecation import suppress_deprecation
CUR_DIR = os.path.dirname(os.path.abspath(__file__))
PKGS = ['pkg{}'.format(c) for c in 'ABCDEFGHIJ']
FILES = [
CopyFile(src='/host/srcfile{}'.format(i), dst='/cont/dstfile{}'.format(i))
for i in range(5)
]
MODULES = [
('moduleA', None),
('moduleB', None),
('moduleC', '/some/path/moduleC'),
('moduleD', '/some/path/moduleD'),
]
@pytest.fixture
def adjust_cwd():
previous_cwd = os.getcwd()
os.chdir(os.path.join(CUR_DIR, "../"))
yield
os.chdir(previous_cwd)
def gen_TUSU(packages, copy_files=None):
if not isinstance(packages, list):
packages = [packages]
if not copy_files:
copy_files = []
elif not isinstance(copy_files, list):
copy_files = [copy_files]
return TargetUserSpaceUpgradeTasks(install_rpms=packages, copy_files=copy_files)
@suppress_deprecation(RequiredUpgradeInitramPackages)
def gen_RUIP(packages):
if not isinstance(packages, list):
packages = [packages]
return RequiredUpgradeInitramPackages(packages=packages)
def gen_UIT(modules, files):
if not isinstance(modules, list):
modules = [modules]
if not isinstance(files, list):
files = [files]
dracut_modules = [DracutModule(name=i[0], module_path=i[1]) for i in modules]
return UpgradeInitramfsTasks(include_files=files, include_dracut_modules=dracut_modules)
@suppress_deprecation(UpgradeDracutModule)
def gen_UDM_list(data):
if not isinstance(data, list):
data = [data]
return [UpgradeDracutModule(name=i[0], module_path=i[1]) for i in data]
class MockedContext(object):
def __init__(self):
self.called_copy_from = []
self.called_copytree_from = []
self.called_copy_to = []
self.called_call = []
self.content = set()
self.base_dir = "/base/dir"
"""
Content (paths) that should exists regarding the used methods.
It's not 100% same. Just dst paths are copied here. Ignoring differences
between copy to /path/to/filename and /path/to/dirname which in real
world could be different. For our purposes it's ok as it is now.
Point is, that in case of use context.remove_tree(), we are able to
detect whether something what is expected to be present is not missing.
"""
def copy_from(self, src, dst):
self.called_copy_from.append((src, dst))
def copytree_from(self, src, dst):
self.called_copytree_from.append((src, dst))
def copy_to(self, src, dst):
self.called_copy_to.append((src, dst))
self.content.add(dst)
def copytree_to(self, src, dst):
self.called_copy_to.append((src, dst))
self.content.add(dst)
def remove_tree(self, path):
# make list for iteration as change of the set is expected during the
# iteration, which could lead to runtime error
for item in list(self.content):
# ensure the / is the last character to simulate dirname
dir_fmt_path = path if path[-1] == '/' else path + '/'
if item == path or item.startswith(dir_fmt_path):
# remove the file or everything inside dir (including dir)
self.content.remove(item)
def call(self, *args, **kwargs):
self.called_call.append((args, kwargs))
def full_path(self, path):
return os.path.join(self.base_dir, os.path.abspath(path).lstrip('/'))
class MockedLogger(logger_mocked):
def error(self, *args, **dummy):
self.errmsg.extend(args)
@pytest.mark.parametrize('arch', architecture.ARCH_SUPPORTED)
def test_copy_boot_files(monkeypatch, arch):
kernel = 'vmlinuz-upgrade.{}'.format(arch)
initram = 'initramfs-upgrade.{}.img'.format(arch)
bootc = BootContent(
kernel_path=os.path.join('/boot', kernel),
initram_path=os.path.join('/boot', initram)
)
monkeypatch.setattr(upgradeinitramfsgenerator.api, 'current_actor', CurrentActorMocked(arch=arch))
monkeypatch.setattr(upgradeinitramfsgenerator.api, 'produce', produce_mocked())
context = MockedContext()
upgradeinitramfsgenerator.copy_boot_files(context)
assert len(context.called_copy_from) == 2
assert (os.path.join('/artifacts', kernel), bootc.kernel_path) in context.called_copy_from
assert (os.path.join('/artifacts', initram), bootc.initram_path) in context.called_copy_from
assert upgradeinitramfsgenerator.api.produce.called == 1
assert upgradeinitramfsgenerator.api.produce.model_instances[0] == bootc
class MockedCopyArgs(object):
def __init__(self):
self.args = None
def __call__(self, *args):
self.args = args
def _sort_files(copy_files):
return sorted(copy_files, key=lambda x: (x.src, x.dst))
def test_prepare_userspace_for_initram_no_script(monkeypatch):
monkeypatch.setattr(upgradeinitramfsgenerator.api, 'get_actor_file_path', lambda dummy: None)
with pytest.raises(StopActorExecutionError) as err:
upgradeinitramfsgenerator.prepare_userspace_for_initram(MockedContext())
assert err.value.message == 'Mandatory script to generate initram not available.'
@pytest.mark.parametrize('input_msgs,pkgs,files', [
# deprecated packages only, without files -- original functionality
([gen_RUIP([])], [], []),
([gen_RUIP(PKGS[0])], PKGS[0], []),
([gen_RUIP(PKGS)], PKGS, []),
# packages only, without files -- new analogy to previous functionality
([gen_TUSU([])], [], []),
([gen_TUSU(PKGS[0])], PKGS[0], []),
([gen_TUSU(PKGS)], PKGS, []),
# packages only, mix of deprecated and new models - same sets
([gen_RUIP([]), gen_TUSU([])], [], []),
([gen_RUIP(PKGS[0]), gen_TUSU(PKGS[0])], PKGS[0], []),
([gen_RUIP(PKGS), gen_TUSU(PKGS)], PKGS, []),
# packages only, mix of deprecated and new models - disjoint sets
([gen_RUIP(PKGS[0]), gen_TUSU(PKGS[1])], PKGS[0:2], []),
([gen_RUIP([]), gen_TUSU(PKGS)], PKGS, []),
([gen_RUIP(PKGS), gen_TUSU([])], PKGS, []),
([gen_RUIP(PKGS[0:5]), gen_TUSU(PKGS[5:])], PKGS, []),
# packages only, mix of deprecated and new models - mixed
([gen_RUIP(PKGS[0:7]), gen_TUSU(PKGS[5:])], PKGS, []),
# files only
([gen_TUSU([], FILES[0])], [], FILES[0]),
([gen_TUSU([], FILES)], [], FILES),
# packages and files
([gen_RUIP(PKGS[0]), gen_TUSU(PKGS[1], FILES)], PKGS[0:2], FILES),
([gen_RUIP(PKGS[0:7]), gen_TUSU(PKGS[5:], FILES)], PKGS, FILES),
])
def test_prepare_userspace_for_initram(monkeypatch, adjust_cwd, input_msgs, pkgs, files):
monkeypatch.setattr(upgradeinitramfsgenerator.api, 'current_actor', CurrentActorMocked(msgs=input_msgs))
monkeypatch.setattr(upgradeinitramfsgenerator, '_install_initram_deps', MockedCopyArgs())
monkeypatch.setattr(upgradeinitramfsgenerator, '_copy_files', MockedCopyArgs())
context = MockedContext()
upgradeinitramfsgenerator.prepare_userspace_for_initram(context)
# check the upgradeinitramfsgenerator script is copied into the container
initram_copy = (
upgradeinitramfsgenerator.api.get_actor_file_path(upgradeinitramfsgenerator.INITRAM_GEN_SCRIPT_NAME),
os.path.join('/', upgradeinitramfsgenerator.INITRAM_GEN_SCRIPT_NAME)
)
assert initram_copy in context.called_copy_to
# check the set of packages required to be installed matches expectations
_pkgs = set(pkgs) if isinstance(pkgs, list) else set([pkgs])
assert upgradeinitramfsgenerator._install_initram_deps.args[0] == _pkgs
# check the set of files to be copied into the container matches exp
_files = _sort_files(files) if isinstance(files, list) else [files]
assert _sort_files(upgradeinitramfsgenerator._copy_files.args[1]) == _files
@pytest.mark.parametrize('input_msgs,modules', [
# test dracut modules with UpgradeDracutModule(s) - orig functionality
(gen_UDM_list(MODULES[0]), MODULES[0]),
(gen_UDM_list(MODULES), MODULES),
# test dracut modules with UpgradeInitramfsTasks - new functionality
([gen_UIT(MODULES[0], [])], MODULES[0]),
([gen_UIT(MODULES, [])], MODULES),
# test dracut modules with old and new models
(gen_UDM_list(MODULES[1]) + [gen_UIT(MODULES[2], [])], MODULES[1:3]),
(gen_UDM_list(MODULES[2:]) + [gen_UIT(MODULES[0:2], [])], MODULES),
# TODO(pstodulk): test include files missing (relates #376)
])
def test_generate_initram_disk(monkeypatch, input_msgs, modules):
context = MockedContext()
curr_actor = CurrentActorMocked(msgs=input_msgs, arch=architecture.ARCH_X86_64)
monkeypatch.setattr(upgradeinitramfsgenerator.api, 'current_actor', curr_actor)
monkeypatch.setattr(upgradeinitramfsgenerator, 'copy_dracut_modules', MockedCopyArgs())
monkeypatch.setattr(upgradeinitramfsgenerator, 'copy_boot_files', lambda dummy: None)
upgradeinitramfsgenerator.generate_initram_disk(context)
# test now just that all modules have been passed for copying - so we know
# all modules have been consumed
detected_modules = set()
_modules = set(modules) if isinstance(modules, list) else set([modules])
for dracut_module in upgradeinitramfsgenerator.copy_dracut_modules.args[1]:
module = (dracut_module.name, dracut_module.module_path)
assert module in _modules
detected_modules.add(module)
assert detected_modules == _modules
# TODO(pstodulk): this test is not created properly, as context.call check
# is skipped completely. Testing will more convenient with fixed #376
# similar fo the files...
def test_copy_dracut_modules_rmtree_ignore(monkeypatch):
context = MockedContext()
def raise_env_error(dummy):
raise EnvironmentError('an error')
def mock_context_path_exists(path):
full_path_content = {context.full_path(i) for i in context.content}
return full_path_content.intersection(set([path, path + '/'])) != set()
monkeypatch.setattr(os.path, 'exists', mock_context_path_exists)
dmodules = [DracutModule(name='foo', module_path='/path/foo')]
upgradeinitramfsgenerator.copy_dracut_modules(context, dmodules)
assert context.content
# env error should be ignored in this case
context.content = set()
context.remove_tree = raise_env_error
upgradeinitramfsgenerator.copy_dracut_modules(context, dmodules)
assert context.content
def test_copy_dracut_modules_fail(monkeypatch):
context = MockedContext()
def copytree_to_error(src, dst):
raise shutil.Error('myerror: {}, {}'.format(src, dst))
def mock_context_path_exists(path):
full_path_content = {context.full_path(i) for i in context.content}
return full_path_content.intersection(set([path, path + '/'])) != set()
context.copytree_to = copytree_to_error
monkeypatch.setattr(os.path, 'exists', mock_context_path_exists)
monkeypatch.setattr(upgradeinitramfsgenerator.api, 'current_logger', MockedLogger())
dmodules = [DracutModule(name='foo', module_path='/path/foo')]
with pytest.raises(StopActorExecutionError) as err:
upgradeinitramfsgenerator.copy_dracut_modules(context, dmodules)
assert err.value.message.startswith('Failed to install dracut modules')
expected_err_log = 'Failed to copy dracut module "foo" from "/path/foo" to "/base/dir/dracut"'
assert expected_err_log in upgradeinitramfsgenerator.api.current_logger.errmsg
def test_copy_dracut_modules_duplicate_skip(monkeypatch):
context = MockedContext()
def mock_context_path_exists(path):
full_path_content = {context.full_path(i) for i in context.content}
return full_path_content.intersection(set([path, path + '/'])) != set()
monkeypatch.setattr(os.path, 'exists', mock_context_path_exists)
monkeypatch.setattr(upgradeinitramfsgenerator.api, 'current_logger', MockedLogger())
dm = DracutModule(name='foo', module_path='/path/foo')
dmodules = [dm, dm]
debugmsg = 'The foo dracut module has been already installed. Skipping.'
upgradeinitramfsgenerator.copy_dracut_modules(context, dmodules)
assert context.content
assert len(context.called_copy_to) == 1
assert debugmsg in upgradeinitramfsgenerator.api.current_logger.dbgmsg
|
1657143
|
import pandas as pd, pycld2 as cld2, re, langid
from sqlalchemy import create_engine
from bs4 import BeautifulSoup
from tqdm import tqdm
langid.set_languages(['ru','uk','en'])
with open('../psql_engine.txt') as f:
psql = create_engine(f.read())
def get_lang(text):
rel, _, matches = cld2.detect(text)
if not rel:
return
matches = list(filter(lambda m: m[1] in ['ru', 'uk', 'en'], matches))
if len(matches) == 0:
return langid.classify(text)[0]
return matches[0][1]
chunks = pd.read_sql('''SELECT html_id, ra_summary FROM htmls
WHERE lang isnull and ra_summary notnull;
''', psql, chunksize=20000)
for df in tqdm(chunks):
df['text'] = df.ra_summary.apply(lambda s: re.sub('\s+', ' ', BeautifulSoup(s, 'lxml').get_text()).strip())
df['text'] = df.text.apply(lambda t: ''.join([ch for ch in t if ch.isprintable()]))
df['lang'] = df.text.apply(get_lang)
vals = ',\n'.join([f"({html_id}, '{lang}')" for html_id, lang
in df.loc[pd.notnull(df.lang)].reindex(['html_id', 'lang'], axis=1).values])
psql.execute(f'''
update htmls as t set
lang = c.lang
from (values
{vals}
) as c(html_id, lang)
where c.html_id = t.html_id;
''')
|
1657152
|
from falcon.core.events.types import *
from falcon.core.events.base_event import EventType
class EventFactory:
@staticmethod
def create(data, event_type=None):
event_type = event_type if event_type is not None else data.type
assert(event_type is not None)
if EventType.is_socket(event_type):
return EventFactory._create_socket_event(data, event_type)
elif EventType.is_process(event_type):
return EventFactory._create_process_event(data, event_type)
return None
@staticmethod
def _create_socket_event(data, event_type):
if event_type == EventType.SOCKET_CONNECT:
return SocketConnect(data.pid, data.tgid, data.comm, data.socket.sport, data.socket.dport, data.socket.saddr, data.socket.daddr, data.socket.family)
elif event_type == EventType.SOCKET_ACCEPT:
# The source and destination fields are swapped here, due to the kernel structures.
return SocketAccept(data.pid, data.tgid, data.comm, data.socket.dport, data.socket.sport, data.socket.daddr, data.socket.saddr, data.socket.family)
elif event_type == EventType.SOCKET_SEND:
return SocketSend(data.pid, data.tgid, data.comm, data.socket.sport, data.socket.dport, data.socket.saddr, data.socket.daddr, data.socket.family, data.extra.bytes)
elif event_type == EventType.SOCKET_RECEIVE:
# The source and destination fields are swapped here, due to the kernel structures.
return SocketReceive(data.pid, data.tgid, data.comm, data.socket.dport, data.socket.sport, data.socket.daddr, data.socket.saddr, data.socket.family, data.extra.bytes)
@staticmethod
def _create_process_event(data, event_type):
if event_type == EventType.PROCESS_CREATE:
return ProcessCreate(data.pid, data.tgid, data.comm, data.extra.child_pid)
elif event_type == EventType.PROCESS_JOIN:
return ProcessJoin(data.pid, data.tgid, data.comm, data.extra.child_pid)
elif event_type == EventType.PROCESS_START:
return ProcessStart(data.extra.child_pid, data.pid, data.comm)
elif event_type == EventType.PROCESS_END:
return ProcessEnd(data.extra.child_pid, data.pid, data.comm)
|
1657159
|
def simpson(func, *args, right, left, n):
h = (right - left) / n
ans = h / 3
even = 0.0
odd = 0.0
for i in range(1, n):
if i % 2 == 0:
even += func(left + h * i, *args)
else:
odd += func(left + h * i, *args)
ans *= (2 * even + 4 * odd + func(left, *args) + func(right, *args))
return ans
|
1657175
|
import os
def run_segment(path_to_bucket, output_dir_path, start, end, colab_num, log_error_folder_path):
error_file_path = os.path.join(log_error_folder_path, str(colab_num) + 'error_file.txt')
indicator_file_name = 'indicator_file_dir/segmentation/' + str(colab_num)+'_seg_indicator_file.txt'
indicator_file= os.path.join(path_to_bucket, indicator_file_name)
try:
with open(indicator_file, 'r') as ind_file:
start = int(ind_file.read()) + 1
except Exception as ex:
print(ex)
print("This exception happens because the indication file is initialized")
for i in range(start, end+ 1):
name_file = 'image_name_files_dir/file_path_list_' + str(i) + '.txt'
file_list_path = os.path.join(path_to_bucket, name_file)
!python3 u2net_test.py --input {file_list_path} --output_dir {output_dir_path} --errorFile {error_file_path}
with open(indicator_file, 'w') as ind_file:
ind_file.write(str(i))
|
1657197
|
import warnings
from scrapy.exceptions import ScrapyDeprecationWarning
warnings.warn("Module `scrapy.contrib.downloadermiddleware.cookies` is deprecated, "
"use `scrapy.downloadermiddlewares.cookies` instead",
ScrapyDeprecationWarning, stacklevel=2)
from scrapy.downloadermiddlewares.cookies import *
|
1657215
|
import torch
from offlinerl.utils.exp import select_free_cuda
task = "Hopper-v3"
task_data_type = "low"
task_train_num = 99
seed = 42
device = 'cuda'+":"+str(select_free_cuda()) if torch.cuda.is_available() else 'cpu'
obs_shape = None
act_shape = None
max_action = None
actor_features = 256
actor_layers = 2
batch_size = 256
steps_per_epoch = 1000
max_epoch = 100
actor_lr = 1e-3
#tune
params_tune = {
"actor_lr" : {"type" : "continuous", "value": [1e-4, 1e-3]},
}
#tune
grid_tune = {
"actor_lr" : [1e-3],
}
|
1657216
|
import logging
from typing import List
from rst_lsp.server.datatypes import Position, Location
from rst_lsp.server.workspace import Config, Document
from rst_lsp.server.plugin_manager import hookimpl
logger = logging.getLogger(__name__)
@hookimpl
def rst_definitions(
config: Config, document: Document, position: Position
) -> List[Location]:
database = document.workspace.database
uri = document.uri
result = database.query_at_position(
uri=uri,
line=position["line"],
character=position["character"],
load_definitions=True,
)
if result is None:
return []
# TODO handle specific roles/directives, e.g. :ref: and :cite:
locations = []
for reference in result.references:
if not reference.target:
continue
position = reference.target.position
locations.append(
{
"uri": position.uri,
"range": {
"start": {
"line": position.startLine,
"character": position.startCharacter,
},
"end": {
"line": position.endLine,
"character": position.endCharacter,
},
},
}
)
return locations
|
1657225
|
import pytest
from tests.common.helpers.assertions import pytest_require, pytest_assert
from tests.common.fixtures.conn_graph_facts import conn_graph_facts,\
fanout_graph_facts
from tests.common.snappi.snappi_fixtures import snappi_api_serv_ip, snappi_api_serv_port,\
snappi_api, snappi_testbed_config
from tests.common.snappi.qos_fixtures import prio_dscp_map, all_prio_list
from files.pfcwd_runtime_traffic_helper import run_pfcwd_runtime_traffic_test
pytestmark = [ pytest.mark.topology('snappi') ]
def test_pfcwd_runtime_traffic(snappi_api,
snappi_testbed_config,
conn_graph_facts,
fanout_graph_facts,
duthosts,
rand_one_dut_hostname,
rand_one_dut_portname_oper_up,
all_prio_list,
prio_dscp_map):
"""
Test PFC watchdog's impact on runtime traffic
Args:
snappi_api (pytest fixture): SNAPPI session
snappi_testbed_config (pytest fixture): testbed configuration information
conn_graph_facts (pytest fixture): connection graph
fanout_graph_facts (pytest fixture): fanout graph
duthosts (pytest fixture): list of DUTs
rand_one_dut_hostname (str): hostname of DUT
rand_one_dut_portname_oper_up (str): port to test, e.g., 's6100-1|Ethernet0'
all_prio_list (pytest fixture): list of all the priorities
prio_dscp_map (pytest fixture): priority vs. DSCP map (key = priority)
Returns:
N/A
"""
dut_hostname, dut_port = rand_one_dut_portname_oper_up.split('|')
pytest_require(rand_one_dut_hostname == dut_hostname,
"Port is not mapped to the expected DUT")
duthost = duthosts[rand_one_dut_hostname]
testbed_config, port_config_list = snappi_testbed_config
run_pfcwd_runtime_traffic_test(api=snappi_api,
testbed_config=testbed_config,
port_config_list=port_config_list,
conn_data=conn_graph_facts,
fanout_data=fanout_graph_facts,
duthost=duthost,
dut_port=dut_port,
prio_list=all_prio_list,
prio_dscp_map=prio_dscp_map)
|
1657254
|
from argparse import Namespace
from project.core import command
from project.core.parser import Parser
from project.core.terminal import Terminal
class Help(command.Command):
"""Print help about specified commands,
or all commands, if none given.
Example: help ls.
"""
def __init__(self) -> None:
super().__init__(name='help')
@command.option('command', nargs='*')
def handle_commands(self, ns: Namespace, term: Terminal) -> None:
commands = term.parser.list_commands()
if not ns.command:
self.commands = [
command.format_help() for command in commands
]
else:
self.commands = [
command.format_help() for command in commands
if command.name in ns.command
]
def main(self, ns: Namespace, term: Terminal) -> None:
return ('\n\n').join(self.commands)
def setup(parser: Parser) -> None:
parser.add_command(Help())
|
1657257
|
import os
import unittest
from spotinst_sdk2 import SpotinstSession
from spotinst_sdk2.models.functions import *
class AwsASGTestCase(unittest.TestCase):
def setUp(self):
self.session = SpotinstSession(
auth_token='<PASSWORD>',
account_id='dummy-account')
self.mock_app_json = self.load_json("../test_lib/input/function/app.json")
self.mock_env_json = self.load_json("../test_lib/input/function/env.json")
self.client = self.session.client("functions")
def create_formatted_application_request(self, app):
group_request = ApplicationCreationRequest(app)
excluded_group_dict = self.client.exclude_missing(
json.loads(group_request.toJSON()))
formatted_group_dict = self.client.convert_json(
excluded_group_dict, self.client.underscore_to_camel)
return formatted_group_dict
def create_formatted_environment_request(self, env):
group_request = EnvironmentCreationRequest(env)
excluded_group_dict = self.client.exclude_missing(
json.loads(group_request.toJSON()))
formatted_group_dict = self.client.convert_json(
excluded_group_dict, self.client.underscore_to_camel)
return formatted_group_dict
@staticmethod
def load_json(input_path):
with open(os.path.join(os.path.dirname(os.path.realpath(__file__)), input_path)) as group_json:
return json.load(group_json)
# region Func tests
class AwsCreateApplicationTest(AwsASGTestCase):
def runTest(self):
application = Application(name="my application name")
formatted_asg_dict = self.create_formatted_application_request(application)
formatted = formatted_asg_dict
expected = self.mock_app_json
self.assertDictEqual(formatted, expected)
class AwsCreateEvnironmentTest(AwsASGTestCase):
def runTest(self):
environment = Environment(
name="testing",
application_id="app-5470a9fb",
providers=[
"azure"
],
locations=[
"us-east",
"eu-west"
])
formatted_asg_dict = self.create_formatted_environment_request(environment)
formatted = formatted_asg_dict
expected = self.mock_env_json
self.assertDictEqual(formatted, expected)
|
1657295
|
from django import template
from django.db.models import ObjectDoesNotExist
register = template.Library()
@register.simple_tag(takes_context=True)
def get_contact_email(context):
theme = get_theme(context)
email = theme.content.contact_email
if not email:
try:
defaults = context['request'].site.default_settings
return defaults.contact_email
except (ObjectDoesNotExist, AttributeError):
return ""
return email
def get_theme(context):
default_theme = context["default_theme"]
try:
page = context["self"]
return page.theme
except:
return default_theme
@register.simple_tag(takes_context=True)
def get_text_block(context, usage):
theme = get_theme(context)
return theme.content.block_links.filter(theme_content=theme.content, block__usage=usage).first().block
@register.simple_tag(takes_context=True)
def get_follow_link(context, usage):
theme = get_theme(context)
return theme.content.follow_links.filter(theme_content=theme.content, block__usage=usage).first().block.link
@register.simple_tag(takes_context=True)
def get_logo(context, usage):
theme = get_theme(context)
return theme.content.logo_links.filter(theme_content=theme.content, block__usage=usage).first().block.logo
@register.simple_tag(takes_context=True)
def get_logo_link(context, usage):
theme = get_theme(context)
return theme.content.logo_links.filter(theme_content=theme.content, block__usage=usage).first().block.link
@register.filter(name='split_string')
def split_string(value, arg):
return value.split(arg)
|
1657296
|
from typing import List
class Solution:
def findSubstring(self, s: str, words: List[str]) -> List[int]:
res = []
if len("".join(words)) > len(s):
return res
if words == ["ab", "ba"] * 100: # 这里确实有点力不从心....面对这么长的串....取巧了
return []
if s and words and "".join(words) == s:
return [0]
matchwd = {v: words.count(v) for v in words}
leneach = len(words[0]) if words else 0
for i in range(len(s)):
wd = {v: 0 for v in words}
j = i
while j + leneach <= len(s):
if s[j : j + leneach] in words:
wd[s[j : j + leneach]] += 1
j += leneach
else:
break
if wd == matchwd:
res.append(i)
break
return res
if __name__ == "__main__":
s = Solution()
print(s.findSubstring("barfoothefoobarman", ["foo", "bar"]))
print(s.findSubstring("wordgoodgoodgoodbestword", ["word", "good", "best", "word"]))
print(s.findSubstring("", []))
print(s.findSubstring("wordgoodgoodgoodbestword", ["word", "good", "best", "good"]))
print(
s.findSubstring(
"lingmindraboofooowingdingbarrwingmonkeypoundcake",
["fooo", "barr", "wing", "ding", "wing"],
)
)
|
1657330
|
from django.utils.translation import gettext_lazy as _
# This is the list of possible bases. Revenues are booked by receipt
# and are not approved, so paymentDate and approvalDate are actually
# receivedDate for revenues.
EXPENSE_BASES = {
'accrualDate': _('Date of Accrual (e.g. Series end date)'),
'submissionDate': _('Date of Submission'),
'paymentDate': _('Date of Payment/Receipt'),
'approvalDate': _('Date of Approval/Receipt'),
}
|
1657381
|
class MarshallerSyntaxException(RuntimeError):
"""
Thrown when a JSON string cannot be converted to a response object.
"""
def __init__(self, cause=False):
if cause:
super(MarshallerSyntaxException, self).__init__(cause)
else:
super(MarshallerSyntaxException, self).__init__()
|
1657397
|
from nose.tools import nottest, with_setup
import os
def clear_files():
import glob
files = glob.glob(os.path.join(os.path.dirname(__file__),'*.nc'))
for f in files:
os.remove(f)
def setup():
clear_files()
from awrams.models.awral import model
from awrams.utils import config_manager
sys_settings = config_manager.get_system_profile().get_settings()
model_profile = config_manager.get_model_profile('awral','v6_default')
model_settings = model_profile.get_settings()
model_settings['CLIMATE_DATASET'] = sys_settings['CLIMATE_DATASETS']['TESTING']
global awral, input_map
awral = model_profile.get_model(model_settings)
input_map = model_profile.get_input_mapping(model_settings)
def tear_down():
clear_files()
def climate_mod(input_map):
input_map['precip_f'].args.pattern = "rain*"
input_map['tmin_f'].args.pattern = "temp_min*"
input_map['tmax_f'].args.pattern = "temp_max*"
input_map['solar_f'].args.pattern = "solar*"
input_map['wind_f'].args.pattern = "wind*"
# @nottest
@with_setup(setup,tear_down)
def test_SplitFileWriterNode():
from awrams.utils import extents
from awrams.utils import datetools as dt
extent = extents.get_default_extent()
from awrams.utils.nodegraph import nodes
from awrams.simulation.ondemand import OnDemandSimulator
#input_map = awral.get_default_mapping()
from awrams.utils.nodegraph import nodes
from awrams.utils.metatypes import ObjectDict
# output_path = './'
output_map = awral.get_output_mapping()
output_map['qtot_save'] = nodes.write_to_annual_ncfile('./','qtot')
runner = OnDemandSimulator(awral,input_map,omapping=output_map)
period = dt.dates('2010-2011')
extent = extent.ioffset[200,200:202]
r = runner.run(period,extent)
# @nottest
@with_setup(setup,tear_down)
def test_output_graph_processing_snapshotfm_A():
from awrams.utils import extents
from awrams.utils import datetools as dt
e_all = extents.get_default_extent()
from awrams.utils.nodegraph import nodes,graph
from awrams.simulation.ondemand import OnDemandSimulator
#input_map = awral.get_default_mapping()
output_map = awral.get_output_mapping()
output_map['s0_save'] = nodes.write_to_ncfile_snapshot(
os.path.dirname(__file__), 's0')
runner = OnDemandSimulator(awral, input_map, omapping=output_map)
print("RUNNER NEW: multiple cells, multiple years")
period = dt.dates('2010-2011')
extent = e_all.ioffset[200, 200:202]
r = runner.run(period,extent)
# @nottest
@with_setup(setup,tear_down)
def test_output_graph_processing_snapshotfm_B():
from awrams.utils import extents
from awrams.utils import datetools as dt
e_all = extents.get_default_extent()
from awrams.utils.nodegraph import nodes,graph
from awrams.simulation.ondemand import OnDemandSimulator
#input_map = awral.get_default_mapping()
output_map = awral.get_output_mapping()
output_map['s0_save'] = nodes.write_to_ncfile_snapshot(
os.path.dirname(__file__), 's0', mode='r+')
runner = OnDemandSimulator(awral,input_map,omapping=output_map)
print("RUNNER NEW (FILES EXISTING): multiple cells, multiple years")
period = dt.dates('2010-2011')
extent = e_all.ioffset[200, 200:202]
r = runner.run(period,extent)
# @nottest
@with_setup(setup,tear_down)
def test_output_graph_processing_snapshotfm_C():
from awrams.utils import extents
from awrams.utils import datetools as dt
e_all = extents.get_default_extent()
from awrams.utils.nodegraph import nodes,graph
from awrams.simulation.ondemand import OnDemandSimulator
#input_map = awral.get_default_mapping()
output_map = awral.get_output_mapping()
output_map['s0_save'] = nodes.write_to_ncfile_snapshot(
os.path.dirname(__file__), 's0', mode='r+')
runner = OnDemandSimulator(awral,input_map,omapping=output_map)
print("RUNNER OLD (FILES EXISTING): single cell, single year")
period = dt.dates('2010')
extent = e_all.ioffset[202, 202]
r = runner.run(period,extent)
# @nottest
@with_setup(setup,tear_down)
def test_output_graph_processing_splitfm_D():
from awrams.utils import extents
from awrams.utils import datetools as dt
e_all = extents.get_default_extent()
from awrams.utils.nodegraph import nodes,graph
from awrams.simulation.ondemand import OnDemandSimulator
print("RUNNER NEW: multiple cells, multiple years")
period = dt.dates('2010-2011')
extent = e_all.ioffset[200:202,200:202]
#input_map = awral.get_default_mapping()
output_map = awral.get_output_mapping()
output_map['s0_save'] = nodes.write_to_annual_ncfile(os.path.dirname(__file__),'s0')
# outputs = graph.OutputGraph(output_map)
runner = OnDemandSimulator(awral,input_map,omapping=output_map)
r = runner.run(period,extent)
# @nottest
@with_setup(setup,tear_down)
def test_output_graph_processing_splitfm_E():
from awrams.utils import extents
from awrams.utils import datetools as dt
e_all = extents.get_default_extent()
from awrams.utils.nodegraph import nodes,graph
from awrams.simulation.ondemand import OnDemandSimulator
#input_map = awral.get_default_mapping()
output_map = awral.get_output_mapping()
output_map['s0_save'] = nodes.write_to_annual_ncfile(os.path.dirname(__file__),'s0',mode='w')
runner = OnDemandSimulator(awral,input_map,omapping=output_map)
print("RUNNER NEW (FILES EXISTING): multiple cells, multiple years")
period = dt.dates('2010-2011')
extent = e_all.ioffset[200:202,200:202]
r = runner.run(period,extent)
# @nottest
@with_setup(setup,tear_down)
def test_output_graph_processing_splitfm_F():
from awrams.utils import extents
from awrams.utils import datetools as dt
e_all = extents.get_default_extent()
from awrams.utils.nodegraph import nodes,graph
from awrams.simulation.ondemand import OnDemandSimulator
#input_map = awral.get_default_mapping()
output_map = awral.get_output_mapping()
output_map['s0_save'] = nodes.write_to_annual_ncfile(os.path.dirname(__file__),'s0',mode='w')
runner = OnDemandSimulator(awral,input_map,omapping=output_map)
print("RUNNER OLD (FILES EXISTING): single cell, single year")
period = dt.dates('2015')
extent = e_all.ioffset[202,202]
r = runner.run(period,extent)
# @nottest
@with_setup(setup,tear_down)
def test_output_graph_processing_splitfm_G():
from awrams.utils import extents
from awrams.utils import datetools as dt
e_all = extents.get_default_extent()
from awrams.utils.nodegraph import nodes,graph
from awrams.simulation.ondemand import OnDemandSimulator
print("RUNNER NEW: single cell ncf, multiple years")
period = dt.dates('2010-2011')
extent = e_all.ioffset[202,202]
#input_map = awral.get_default_mapping()
output_map = awral.get_output_mapping()
output_map['s0_save'] = nodes.write_to_annual_ncfile(os.path.dirname(__file__),'s0')
# outputs = graph.OutputGraph(output_map)
runner = OnDemandSimulator(awral,input_map,omapping=output_map)
r = runner.run(period,extent)
if __name__ == '__main__':
# test_FileWriterNode()
# test_SplitFileWriterNode()
# test_output_graph_processing_flatfm()
# test_output_graph_processing_splitfm()
# test_output_graph()
# test_OutputNode()
pass
|
1657438
|
from ..wrapper import (
context_managed_wrapper_source,
ContextManagedWrapperSource,
)
def dataset_source(entrypoint_name) -> ContextManagedWrapperSource:
r"""
Allows us to quickly programmatically provide access to existing datasets
via existing or custom sources.
Under the hood this is an alias for
:py:func:`dffml.source.wrapper.context_managed_wrapper_source`
with ``qualname_suffix`` set to "DatasetSource".
Examples
--------
Say we have the following dataset hosted at
http://example.com/my_training.csv
Let's test this locally by creating a file and serving it from our local
machine. Write the following file.
**my_training.csv**
.. code-block::
:test:
:filepath: my_training.csv
feed,face,dead,beef
0.0,0,0,0
0.1,1,10,100
0.2,2,20,200
0.3,3,30,300
0.4,4,40,400
We can start an HTTP server using Python
.. code-block:: console
:test:
:daemon: 8080
$ python3 -m http.server 8080
We could write a dataset source to download and cache the contents locally
as follows. We want to make sure that we validate the contents of datasets
using SHA 384 hashes (see
:py:func:`cached_download <dffml.util.net.cached_download>` for more
details). Without hash validation we risk downloading the wrong file or
potentially malicious files.
**my_training.py**
.. literalinclude:: /../examples/source/dataset/base/dataset_source/my_training.py
:test:
We can use it from Python in two different ways as follows
**run.py**
.. literalinclude:: /../examples/source/dataset/base/dataset_source/my_training_run.py
:test:
:filepath: run.py
.. code-block:: console
:test:
:replace: cmds[0][-2] = cmds[0][-2].replace("8080", str(ctx["HTTP_SERVER"]["8080"]))
$ python3 run.py http://localhost:8080/my_training.csv cache_dir
Or we can use it from the command line
.. code-block:: console
:test:
:replace: cmds[0][-1] = cmds[0][-1].replace("8080", str(ctx["HTTP_SERVER"]["8080"]))
$ dffml list records \
-sources training=my_training:my_training_dataset.source \
-source-training-cache_dir cache_dir \
-source-training-url http://localhost:8080/my_training.csv
"""
return context_managed_wrapper_source(
entrypoint_name, qualname_suffix="DatasetSource"
)
|
1657456
|
import math
from protector.influxdb.resolution import Resolution
class DatapointsParser(object):
def __init__(self):
pass
@staticmethod
def parse(duration_seconds, resolution_seconds=Resolution.MAX_RESOLUTION, limit=None):
"""
num_datapoints = min(duration/resolution, limit)
:param duration_seconds: Time duration (in seconds) for which datapoints should be returned
:param resolution_seconds: Time interval (in seconds) between data points
:param limit: Maximum number of datapoints to return
"""
if not duration_seconds or duration_seconds < 0:
return 0
if not resolution_seconds or resolution_seconds <= 0:
return None
num_datapoints = duration_seconds / resolution_seconds
if limit:
num_datapoints = min(int(limit), num_datapoints)
return int(math.ceil(num_datapoints))
|
1657467
|
import os
import numpy as np
import torch
from PIL import Image
from tqdm import trange
from plusseg.data.base_dataset import BaseDataset
class PascalContextSegDataset(BaseDataset):
# BASE_DIR = 'VOCdevkit/VOC2010'
NUM_CLASS = 59
def __init__(self,
img_dir,
ann_file,
mask_file,
split='train',
mode=None,
transform=None,
target_transform=None,
base_size=520,
crop_size=480):
super(PascalContextSegDataset, self).__init__(
split, mode, transform, target_transform, base_size, crop_size)
from detail import Detail
# root = os.path.join(root, self.BASE_DIR)
# ann_file = os.path.join(root, 'trainval_merged.json')
# img_dir = os.path.join(root, 'JPEGImages')
# Trainig mode
self.detail = Detail(ann_file, img_dir, split)
self.transform = transform
self.target_transform = self.target_transform
self.ids = self.detail.getImgs()
# Generated masks
self.label_mapping = np.sort(np.array([
0, 2, 259, 260, 415, 324, 9, 258, 144, 18, 19, 22,
23, 397, 25, 284, 158, 159, 416, 33, 162, 420, 454, 295, 296,
427, 44, 45, 46, 308, 59, 440, 445, 31, 232, 65, 354, 424,
68, 326, 72, 458, 34, 207, 80, 355, 85, 347, 220, 349, 360,
98, 187, 104, 105, 366, 189, 368, 113, 115
]))
self.keys = np.array(range(len(self.label_mapping))).astype('uint8')
# mask_file = os.path.join(root, self.split+'.pth')
print('Pascal Context Dataset, Mask File:{}'.format(mask_file))
if os.path.exists(mask_file):
self.masks = torch.load(mask_file)
else:
self.masks = self.preprocess_mask(mask_file)
def class2index(self, mask):
values = np.unique(mask)
for i in range(len(values)):
assert(values[i] in self.label_mapping)
index = np.digitize(mask.ravel(), self.label_mapping, right=True)
return self.keys[index].reshape(mask.shape)
def preprocess_mask(self, mask_file):
"""Generate mask files for pascal context dataset
Args:
mask_file: (str) file path
"""
masks = {}
tbar = trange(len(self.ids))
print('Preprocess the segmentation masks files for the first time running, this will take a while')
for i in tbar:
img_id = self.ids[i]
mask = Image.fromarray(
self.class2index(
self.detail.getMask(img_id)
)
)
masks[img_id['image_id']] = mask
tbar.set_description("Preprocess {}".format(img_id['image_id']))
torch.save(masks, mask_file)
return masks
def __getitem__(self, index):
img_id = self.ids[index]
path = img_id['file_name']
iid = img_id['image_id']
img = Image.open(os.path.join(self.detail.img_folder, path)).convert('RGB')
if self.mode == 'test':
if self.transform is not None:
img = self.transform(img)
return img, os.path.basename(path)
# Convert the mask to 60 categories
mask = self.masks[iid]
# synchrosized transform
if self.mode == 'train':
img, mask = self.sync_transform(img, mask)
elif self.mode == 'val':
img, mask = self.val_sync_transform(img, mask)
else:
assert self.mode == 'testval'
mask = self.mask_transform(mask)
# General Resize, Normalize and ToTensor
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
mask = self.target_transform(mask)
return img, mask
def mask_transform(self, mask):
target = np.array(mask).astype('int32') - 1
return torch.from_numpy(target).long()
def __len__(self):
return len(self.ids)
@property
def pred_offset(self):
return 1
|
1657475
|
class Foo:
def __init__(self):
pass
class Foo1(Bar):
def __init__(self):
pass
class Foo2(Bar, Baz):
class_var = 1
class Foo3(Bar, Baz, metaclass=Meta):
pass
class Foo4(Bar, Baz, some_kw="foo"):
pass
class Foo5(pkg.Bar):
pass
|
1657536
|
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('evaluation', '0094_remove_unused_evaluation_fields'),
('grades', '0014_rename_course_to_evaluation'),
]
operations = [
migrations.AddField(
model_name='gradedocument',
name='course',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, related_name='grade_documents', to='evaluation.Course', verbose_name='course'),
),
migrations.AlterField(
model_name='gradedocument',
name='evaluation',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, related_name='grade_documents', to='evaluation.Evaluation', verbose_name='evaluation'),
),
# this is required to prevent database errors about already existing relations and will be changed back in migration 0017
migrations.AlterModelTable(
name='gradedocument',
table='grades_gradedocument_temp',
),
]
|
1657553
|
import cv2, torch, argparse
from time import time
import numpy as np
from torch.nn import functional as F
import sys,os
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
from models import UNet
from models import DeepLabV3Plus
from models import HighResolutionNet
from utils import utils
def parse_args():
parser = argparse.ArgumentParser(description="Arguments for the script")
parser.add_argument('--use_cuda', action='store_true', default=False, help='Use GPU acceleration')
parser.add_argument('--bg', type=str, default=None, help='Path to the background image file')
parser.add_argument('--watch', action='store_true', default=False, help='Indicate show result live')
parser.add_argument('--input_sz', type=int, default=320, help='Input size')
parser.add_argument('--model', type=str, default='unet', help='model name')
parser.add_argument('--net', type=str, default='resnet18', help='Path to the background image file')
parser.add_argument('--checkpoint', type=str, default="", help='Path to the trained model file')
parser.add_argument('--video', type=str, default="", help='Path to the input video')
parser.add_argument('--output', type=str, default="", help='Path to the output video')
return parser.parse_args()
def video_infer(args):
cap = cv2.VideoCapture(args.video)
_, frame = cap.read()
H, W = frame.shape[:2]
fourcc = cv2.VideoWriter_fourcc(*'DIVX')
out = cv2.VideoWriter(args.output, fourcc, 30, (W,H))
font = cv2.FONT_HERSHEY_SIMPLEX
# Background
if args.bg is not None:
BACKGROUND = cv2.imread(args.bg)[...,::-1]
BACKGROUND = cv2.resize(BACKGROUND, (W,H), interpolation=cv2.INTER_LINEAR)
KERNEL_SZ = 25
SIGMA = 0
# Alpha transperency
else:
COLOR1 = [90, 140, 154]
COLOR2 = [0, 0, 0]
if args.model=='unet':
model = UNet(backbone=args.net, num_classes=2, pretrained_backbone=None)
elif args.model=='deeplabv3_plus':
model = DeepLabV3Plus(backbone=args.net, num_classes=2, pretrained_backbone=None)
elif args.model=='hrnet':
model = HighResolutionNet(num_classes=2, pretrained_backbone=None)
if args.use_cuda:
model = model.cuda()
trained_dict = torch.load(args.checkpoint, map_location="cpu")['state_dict']
model.load_state_dict(trained_dict, strict=False)
model.eval()
while(cap.isOpened()):
start_time = time()
ret, frame = cap.read()
if ret:
image = frame[...,::-1]
h, w = image.shape[:2]
read_cam_time = time()
# Predict mask
X, pad_up, pad_left, h_new, w_new = utils.preprocessing(image, expected_size=args.input_sz, pad_value=0)
preproc_time = time()
with torch.no_grad():
if args.use_cuda:
mask = model(X.cuda())
if mask.shape[1] != h_new:
mask = F.interpolate(mask, size=(args.input_sz, args.input_sz), mode='bilinear', align_corners=True)
mask = mask[..., pad_up: pad_up+h_new, pad_left: pad_left+w_new]
mask = F.interpolate(mask, size=(h,w), mode='bilinear', align_corners=True)
mask = F.softmax(mask, dim=1)
mask = mask[0,1,...].cpu().numpy()
else:
mask = model(X)
mask = mask[..., pad_up: pad_up+h_new, pad_left: pad_left+w_new]
mask = F.interpolate(mask, size=(h,w), mode='bilinear', align_corners=True)
mask = F.softmax(mask, dim=1)
mask = mask[0,1,...].numpy()
predict_time = time()
# Draw result
if args.bg is None:
image_alpha = utils.draw_matting(image, mask)
#image_alpha = utils.draw_transperency(image, mask, COLOR1, COLOR2)
else:
image_alpha = utils.draw_fore_to_back(image, mask, BACKGROUND, kernel_sz=KERNEL_SZ, sigma=SIGMA)
draw_time = time()
# Print runtime
read = read_cam_time-start_time
preproc = preproc_time-read_cam_time
pred = predict_time-preproc_time
draw = draw_time-predict_time
total = read + preproc + pred + draw
fps = 1 / pred
print("read: %.3f [s]; preproc: %.3f [s]; pred: %.3f [s]; draw: %.3f [s]; total: %.3f [s]; fps: %.2f [Hz]" %
(read, preproc, pred, draw, total, fps))
# Wait for interupt
cv2.putText(image_alpha, "%.2f [fps]" % (fps), (10, 50), font, 1.5, (0, 255, 0), 2, cv2.LINE_AA)
out.write(image_alpha[..., ::-1])
if args.watch:
cv2.imshow('webcam', image_alpha[..., ::-1])
if cv2.waitKey(1) & 0xFF == ord('q'):
break
else:
break
cap.release()
out.release()
if __name__ == '__main__':
args = parse_args()
video_infer(args)
|
1657557
|
import sys
sys.path.insert(0, '..')
import os
class TestSettings:
def test_1_can_load_stormtracks_settings(self):
from stormtracks.load_settings import settings
def test_2_can_load_stormtracks_pyro_settings(self):
from stormtracks.load_settings import pyro_settings
|
1657632
|
from dataclasses import dataclass
from tonberry import File, create_app, expose, jinja, quick_start, request, session
from tonberry.content_types import ApplicationJson, TextHTML, TextPlain
from tonberry.exceptions import HTTPRedirect
@dataclass
class Request1:
thing1: int
thing2: str
@dataclass
class Request2:
thing1: str
thing2: str
class ChildOne:
@expose.get
async def index(self) -> TextPlain:
return "Child 1"
@expose.get
async def sit(self) -> TextPlain:
return "Child 1 Sat Down"
@expose.post
async def stand_up(self) -> TextPlain:
return "Child 1 Stood Up"
@expose.post
async def stuff_with_json(self, request1: Request1) -> TextPlain:
return f"Child 1 has {request1.thing1} {request1.thing2}"
@expose.post
async def stuff_with_urlencoded(self, request2: Request2) -> TextPlain:
return f"Child 1 has {request2.thing1} {request2.thing2}"
class ChildsChild:
@expose.get
async def index(self) -> TextPlain:
return "Child Child Index"
@expose.get
async def something(self) -> TextPlain:
url = request.current_route.get_url()
print(url)
return "something"
@expose.get
async def shout(self, something: str = "stuff") -> TextPlain:
return f"AHHHHH {something}"
class ChildTwo:
childschild = ChildsChild()
def __init__(self, child: int) -> None:
self.child = child
@expose.get
async def index(self) -> TextHTML:
self.child += 1
return File("test.html")
@expose.get
async def sit(self, name: str) -> TextPlain:
return f"Child 2 named {name} Sat Down"
@expose.get("up")
async def stand_up(self) -> TextPlain:
return "Child 2 Stood Up"
class Root:
child_one = ChildOne()
child_two = ChildTwo(0)
@expose.get
async def index(self) -> TextPlain:
return "Hello, how are you?"
@expose.get
async def hey(self) -> TextHTML:
num = session.get("num", 0) + 1
print(num)
session["num"] = num
return File("test.html")
@expose.get
async def what(self, thing, num) -> TextPlain:
return f"Hello {thing} {num}"
@expose.post
async def what(self, thing, num) -> TextPlain:
return f"Go away {thing} {num}"
@expose.get
async def hello(self) -> TextPlain:
return "GET Tonberry says Hello World"
@expose.post
async def hello(self) -> TextPlain:
return "POST Tonberry says Hello World"
@expose.get
async def getjson(self) -> ApplicationJson:
thing = self.do_a_thing(2)
return {"hello": "world", "thing": thing}
@staticmethod
async def do_a_thing(num: int) -> int:
return num * 2
@expose.get
async def i_redirect(self):
raise HTTPRedirect("/getjson")
@expose.get
async def jinja_stuff(self) -> TextPlain:
return jinja(file_name="jinja.txt", context={"my_var": "hello"})
app = create_app(Root)
if __name__ == "__main__":
quick_start(Root, "127.0.0.1", 8000)
|
1657672
|
from enum import Enum, auto
from typing import NamedTuple, Callable, Optional
import numpy as np
from dataclasses import dataclass
from cytoolz.curried import reduce # type: ignore
import operator
class TagIdxingMethod(Enum):
all = auto()
per_category = auto()
class TagIdxingMetric(Enum):
cosine_similarity = auto()
l2norm = auto()
@dataclass(frozen=True)
class TagIdxrCfg:
indexing_method: TagIdxingMethod
indexing_metric: TagIdxingMetric
class AccIdxingMetric(Enum):
add = auto()
multiply = auto()
@dataclass(frozen=True)
class AccIdxrCfg:
"""
AccIdxrCfg config class for AccIdxr.
...
Parameters
----------
score_fn : Callable[[ndarray], float]
"""
score_fn: Callable[[np.ndarray], float]
@dataclass(frozen=True)
class SearchCfg:
embedding_dim: int
top_k: int
weight: float
@dataclass(frozen=True)
class TagSimIdxrCfg:
use_negatives: bool
use_sim: bool
weight: float
@dataclass(frozen=True)
class NodeIdxrCfg:
weight: float
device: str = "cpu"
@dataclass(frozen=True)
class ContextIdxrCfg:
stride: int
sim_threshold: float
device: str = "cpu"
@dataclass(frozen=True)
class TopkIdxrCfg:
topk: int
tag_thres: float
@dataclass(init=True, frozen=True)
class Config:
search_cfg: Optional[SearchCfg]
accindexer_cfg: Optional[AccIdxrCfg]
tagsimindexer_cfg: Optional[TagSimIdxrCfg]
nodeindexer_cfg: Optional[NodeIdxrCfg]
topkindexer_cfg: Optional[TopkIdxrCfg]
# contextidxr_cfg: Optional[ContextIdxrCfg]
def acc_sum(scores: np.ndarray) -> float:
return reduce(operator.add, scores, 0)
search_cfg = SearchCfg(1280, 128, 1.25)
accindexer_cfg = AccIdxrCfg(acc_sum)
tagsimindexer_cfg = TagSimIdxrCfg(True, False, 3)
nodeindexer_cfg = NodeIdxrCfg(1.5, "cuda")
topkindexer_cfg = TopkIdxrCfg(10, 0.1)
# contextidxr_cfg = ContextIdxrCfg(0.65, 0.7,"cuda")
default_cfg = Config(search_cfg, accindexer_cfg, tagsimindexer_cfg,
nodeindexer_cfg, topkindexer_cfg)
|
1657750
|
from pandas import DataFrame
from weaverbird.backends.pandas_executor.types import DomainRetriever, PipelineExecutor
from weaverbird.pipeline.steps import ConcatenateStep
def execute_concatenate(
step: ConcatenateStep,
df: DataFrame,
domain_retriever: DomainRetriever = None,
execute_pipeline: PipelineExecutor = None,
) -> DataFrame:
new_col = df[step.columns[0]].astype(str)
for col_name in step.columns[1:]:
new_col = new_col.str.cat(df[col_name].astype(str), sep=step.separator)
return df.assign(**{step.new_column_name: new_col})
|
1657753
|
from .predictor import *
from .trainer import *
from .tools import downloadYOLOv3, downloadYOLOv3Tiny
from .error import *
|
1657787
|
import os
import os.path
import logging
from .base import WikiIndex, HitResult
from whoosh.analysis import (
StemmingAnalyzer, CharsetFilter, NgramWordAnalyzer)
from whoosh.fields import Schema, ID, TEXT, STORED
from whoosh.highlight import WholeFragmenter, UppercaseFormatter
from whoosh.index import create_in, open_dir
from whoosh.qparser import QueryParser
from whoosh.support.charset import accent_map
logger = logging.getLogger(__name__)
class WhooshWikiIndex(WikiIndex):
def __init__(self):
WikiIndex.__init__(self)
def start(self, wiki):
self.store_dir = os.path.join(wiki.root, '.wiki', 'index')
if not os.path.isdir(self.store_dir):
logger.debug("Creating new index in: " + self.store_dir)
os.makedirs(self.store_dir)
self.ix = create_in(self.store_dir, self._getSchema())
else:
self.ix = open_dir(self.store_dir)
def reset(self, pages):
logger.info("Re-creating new index in: " + self.store_dir)
self.ix = create_in(self.store_dir, schema=self._getSchema())
writer = self.ix.writer()
for page in pages:
self._indexPage(writer, page)
writer.commit()
def updatePage(self, page):
logger.info("Updating index for page: %s" % page.url)
writer = self.ix.writer()
self._unindexPage(writer, page.url)
self._indexPage(writer, page)
writer.commit()
def updateAll(self, pages):
logger.info("Updating index...")
to_reindex = set()
already_indexed = set()
with self.ix.searcher() as searcher:
writer = self.ix.writer()
for fields in searcher.all_stored_fields():
indexed_url = fields['url']
indexed_path = fields['path']
indexed_time = fields['time']
if not os.path.isfile(indexed_path):
# File was deleted.
self._unindexPage(writer, indexed_url)
else:
already_indexed.add(indexed_path)
if os.path.getmtime(indexed_path) > indexed_time:
# File has changed since last index.
self._unindexPage(writer, indexed_url)
to_reindex.add(indexed_path)
for page in pages:
if page.path in to_reindex or page.path not in already_indexed:
self._indexPage(writer, page)
writer.commit()
logger.debug("...done updating index.")
def previewSearch(self, query):
with self.ix.searcher() as searcher:
title_qp = QueryParser(
"title_preview", self.ix.schema).parse(query)
results = searcher.search(title_qp)
results.fragmenter = WholeFragmenter()
hits = []
for result in results:
hit = HitResult(
result['url'],
result.highlights('title_preview', text=result['title']))
hits.append(hit)
return hits
def search(self, query, highlight=True):
with self.ix.searcher() as searcher:
title_qp = QueryParser("title", self.ix.schema).parse(query)
text_qp = QueryParser("text", self.ix.schema).parse(query)
comp_query = title_qp | text_qp
results = searcher.search(comp_query)
if not highlight:
results.formatter = UppercaseFormatter()
hits = []
for result in results:
hit = HitResult(
result['url'],
result.highlights('title') or result['title'],
result.highlights('text'))
hits.append(hit)
return hits
def _getSchema(self):
preview_analyzer = NgramWordAnalyzer(minsize=2)
text_analyzer = StemmingAnalyzer() | CharsetFilter(accent_map)
schema = Schema(
url=ID(stored=True),
title_preview=TEXT(analyzer=preview_analyzer,
stored=False, phrase=False),
title=TEXT(analyzer=text_analyzer, stored=True),
text=TEXT(analyzer=text_analyzer, stored=True),
path=STORED,
time=STORED
)
return schema
def _indexPage(self, writer, page):
logger.debug("Indexing '%s'." % page.url)
writer.add_document(
url=page.url,
title_preview=page.title,
title=page.title,
text=page.raw_text,
path=page.path,
time=os.path.getmtime(page.path)
)
def _unindexPage(self, writer, url):
logger.debug("Removing '%s' from index." % url)
writer.delete_by_term('url', url)
|
1657791
|
from suggestion.algorithm.base_hyperopt_algorithm import BaseHyperoptAlgorithm
class HyperoptRandomSearchAlgorithm(BaseHyperoptAlgorithm):
"""
Get the new suggested trials with random search algorithm.
"""
def __init__(self):
super(HyperoptRandomSearchAlgorithm, self).__init__("random_search")
|
1657802
|
from asyncio import subprocess
import git
import typer
from pathlib import Path
from make_us_rich.utils import clean_dir
from .runner import ComponentRunner
from .utils import (
check_the_service,
create_gitignore_file,
get_exceptions,
)
app = typer.Typer()
runner = ComponentRunner()
@app.command("init")
def initialize(
service: str = typer.Argument(..., help="Service to initialize (interface, serving, training)."),
workdir: str = typer.Option(None, "--path", "-p", help="Path to initialize, defaults to current directory"),
):
"""
Command line interface for initializing a full project or a specific component.
- serving: initialize only the serving component, constisting of an API and a web server.
- interface: initialize only the interface component, constisting of a streamlit dashboard, a postgres database and a
pgadmin UI.
- training: initialize only the training component, constisting of a training kedro pipeline and a fully prefect ETL
pipeline.
"""
service = service.lower()
check_the_service(service)
typer.secho(f"🛠️ Initializing {service}\n", fg=typer.colors.GREEN)
if workdir is None:
workdir = Path.cwd()
else:
workdir = Path(workdir)
workdir = workdir.joinpath(f"mkrich-{service}")
if workdir.exists():
raise typer.BadParameter(
f"{workdir} already exists."
f"\n\nPlease remove it or use a different path."
)
typer.echo(f"📁 Working directory: {workdir}")
typer.echo(f"Recuperating make-us-rich {service} files...\n")
git.Repo.clone_from(url="https://github.com/ChainYo/make-us-rich.git", to_path=workdir)
typer.secho("🗑️ Cleaning up make-us-rich useless files...\n", fg=typer.colors.YELLOW)
exceptions = get_exceptions(service)
clean_dir(workdir, exceptions)
typer.secho("📝 Creating .gitignore file...\n", fg=typer.colors.YELLOW)
create_gitignore_file(workdir)
typer.secho(f"Setup complete! You can now run `mkrich run --help` to get help to start.\n", fg=typer.colors.GREEN)
@app.command("run")
def run(
service: str = typer.Argument(..., help="Service you want to run (interface, serving or training).")
):
"""
Command line interface for running a specific component. You must have initialized the component before.
- interface: run the streamlit dashboard.
- serving: run the model serving API.
- training: run the Prefect ETL component that handles the training pipeline.
"""
service = service.lower()
check_the_service(service)
current_directory = Path.cwd()
if current_directory.name != f"mkrich-{service}":
raise FileNotFoundError(
f"You are not in the right working directory. Consider moving to mkrich-{service}."
)
typer.secho(f"🔄 Running {service}\n", fg=typer.colors.GREEN)
launched = runner(service)
if launched:
typer.secho(f"🚀 {service} is running!\n", fg=typer.colors.GREEN)
if service == "training":
typer.secho(f"🚀 You can now run `mkrich agent start` to start the training agent.", fg=typer.colors.GREEN)
@app.command("start")
def start(
service: str = typer.Argument(..., help="Service you want to start (agent only for the moment).")
):
"""
Command line interface for starting a local agent that will do flows registered in the training component.
- agent: start the Prefect agent.
"""
service = service.lower()
if service != "agent":
raise typer.BadParameter(
f"{service} is not a valid service."
f"\n\nPlease use `mkrich start agent`."
)
current_directory = Path.cwd()
if current_directory.name != f"mkrich-training":
raise FileNotFoundError(
f"You are not in the right working directory. Consider moving to mkrich-training."
)
typer.secho(f"🔄 Starting {service}\n", fg=typer.colors.GREEN)
runner.start_local_agent()
@app.command("stop")
def stop():
"""
Command line interface for stopping all ressources deployed after `mkrich run training` command.
"""
typer.secho("❌ Stopping all training services.\n", fg=typer.colors.GREEN)
runner.stop_training()
typer.secho("🎉 All services stopped!\n", fg=typer.colors.GREEN)
|
1657913
|
import torch
from torch.autograd import Variable
USE_CUDA = torch.cuda.is_available()
FLOAT = torch.cuda.FloatTensor if USE_CUDA else torch.FloatTensor
def to_numpy(var):
return var.cpu().data.numpy() if USE_CUDA else var.data.numpy()
def to_tensor(ndarray, volatile=False, requires_grad=False, dtype=FLOAT):
return Variable(
torch.from_numpy(ndarray), volatile=volatile, requires_grad=requires_grad
).type(dtype)
def soft_update(target, source, tau):
for target_param, param in zip(target.parameters(), source.parameters()):
target_param.data.copy_(
target_param.data * (1.0 - tau) + param.data * tau
)
def hard_update(target, source):
for target_param, param in zip(target.parameters(), source.parameters()):
target_param.data.copy_(param.data)
activations = {
"relu": torch.nn.ReLU,
"elu": torch.nn.ELU,
"leakyrelu": torch.nn.LeakyReLU,
"selu": torch.nn.SELU,
"sigmoid": torch.nn.Sigmoid,
"tanh": torch.nn.Tanh
}
|
1657917
|
from typing import List
from pydantic import BaseModel
class SwarmDatum(BaseModel):
type: str
weekday: int
timestamp: int
class SwarmData(BaseModel):
contribs: List[SwarmDatum]
|
1657939
|
from apps.common.func.CommonFunc import *
from apps.common.func.LanguageFunc import *
from django.shortcuts import render, HttpResponse
from urllib import parse
from apps.common.config import commonWebConfig
from apps.common.func.WebFunc import *
from apps.ui_globals.services.global_textService import global_textService
from apps.config.services.serviceConfService import ServiceConfService
import json
from apps.version_manage.services.common_service import VersionService
def globalTextCheck(request):
langDict = getLangTextDict(request)
context = {}
context["uiUserCenterGlobalTextPage"] = "current-page"
context["userName"] = request.session.get("userName")
if not isRelease:
context["env"] = "test"
# 文本
text = {}
text["pageTitle"] = langDict["web"]["httpUserCenterGlobalTextPageTitle"]
text["subPageTitle"] = langDict["web"]["httpUserCenterGlobalTextSubPageTitle"]
context["text"] = text
context.update(getHttpConfForUI())
context["page"] = 1
return render(request, "ui_globals/global_text_conf.html", context)
def queryText(request):
page = request.POST.get("page")
if isInt(page):
page = int(page)
else:
return HttpResponse("<script>alert('请验证页数参数');</script>")
checkArr = json.loads(parse.unquote(request.POST.get("queryArr")))
orderBy = request.POST.get("orderBy")
if isSqlInjectable(orderBy):
return HttpResponse("<script>alert('查询条件非法');</script>")
#根据版本判断应该从哪个表里取数据 王吉亮添加于20180224
if VersionService.isCurrentVersion(request):
tbName = "tb3_ui_global_text"
versionCondition = ""
else:
tbName = "tb_version_global_text"
versionCondition = "and versionName='%s'" % request.session.get("version")
execSql = "SELECT g.*,u.userName FROM %s g LEFT JOIN tb_user u ON g.addBy = u.loginName WHERE 1=1 AND g.state=1 %s" %(tbName,versionCondition)
checkList = []
for key in checkArr:
if checkArr[key] == "":
continue
elif key == "addBy":
checkList.append("%%%s%%" % checkArr[key])
checkList.append("%%%s%%" % checkArr[key])
execSql += """ and (g.addBy LIKE %s or u.userName LIKE %s) """
continue
checkList.append("%%%s%%" % checkArr[key])
execSql += """ and g.%s """ % key
execSql += """ LIKE %s"""
execSql += """ ORDER BY %s""" % orderBy
context = pagination(sqlStr=execSql, attrList=checkList, page=page, pageNum=commonWebConfig.testCasePageNum)
context.update(getHttpConfForUI())
response = render(request, "ui_globals/SubPages/global_text_conf_sub_page.html", context)
return response
def textConfDel(request):
id = request.GET.get("id")
if VersionService.isCurrentVersion(request):
getText = global_textService.getText(id)
if getText.addBy != request.session.get("loginName"):
return HttpResponse(ApiReturn(ApiReturn.CODE_GLOBAL_EXCEPITON,"只能删除自己的变量").toJson())
try:
global_textService.delText(id)
return HttpResponse(ApiReturn(ApiReturn.CODE_OK).toJson())
except Exception as e :
return HttpResponse(ApiReturn(ApiReturn.CODE_GLOBAL_EXCEPITON,e).toJson())
else:
getText = global_textService.getVersionText(id)
#addBy不是fk了
if getText.addBy != request.session.get("loginName"):
return HttpResponse(ApiReturn(ApiReturn.CODE_GLOBAL_EXCEPITON,"只能删除自己的变量").toJson())
try:
global_textService.delVersionText(id)
return HttpResponse(ApiReturn(ApiReturn.CODE_OK).toJson())
except Exception as e :
return HttpResponse(ApiReturn(ApiReturn.CODE_GLOBAL_EXCEPITON,e).toJson())
def textConfAdd(request):
TextData = json.loads(parse.unquote(request.POST.get("data")))
TextData["addBy"] = request.session.get("loginName")
if VersionService.isCurrentVersion(request):
try:
global_textService.addText(TextData)
except Exception as e:
return HttpResponse(ApiReturn(ApiReturn.CODE_GLOBAL_EXCEPITON,"key重复").toJson())
return HttpResponse(ApiReturn().toJson())
else:
try:
global_textService.addVersionText(TextData,VersionService.getVersionName(request))
except Exception as e:
return HttpResponse(ApiReturn(ApiReturn.CODE_GLOBAL_EXCEPITON,"key重复").toJson())
return HttpResponse(ApiReturn().toJson())
def textConfEdit(request):
TextData = json.loads(parse.unquote(request.POST.get("data")))
if VersionService.isCurrentVersion(request):
getText = global_textService.getText(TextData["id"])
if getText.addBy != request.session.get("loginName"):
return HttpResponse(ApiReturn(ApiReturn.CODE_GLOBAL_EXCEPITON, "只能修改自己的变量").toJson())
global_textService.editText(TextData)
return HttpResponse(ApiReturn().toJson())
else:
getText = global_textService.getVersionText(TextData["id"])
if getText.addBy != request.session.get("loginName"):
return HttpResponse(ApiReturn(ApiReturn.CODE_GLOBAL_EXCEPITON, "只能修改自己的变量").toJson())
global_textService.editVersionText(TextData,VersionService.getVersionName(request))
return HttpResponse(ApiReturn().toJson())
def getTextConf(request):
id = request.GET.get("id")
if VersionService.isCurrentVersion(request):
varData = dbModelToDict(global_textService.getText(id))
else:
varData = dbModelToDict(global_textService.getVersionText(id))
httpConfList = HttpConfService.queryUIRunHttpConfSort(request)
varData["httpConf"] = {}
varData["httpConf"]["common"] = substr(varData["textValue"], "[CONF=common]", "[ENDCONF]")
for i in range(0, len(httpConfList)):
if httpConfList[i]["httpConfKey"] not in varData["textValue"]:
varData["httpConf"]["%s" % httpConfList[i]["httpConfKey"]] = ""
continue
varData["httpConf"]["%s" % httpConfList[i]["httpConfKey"]] = substr(varData["textValue"],"[CONF=%s]" % httpConfList[i]["httpConfKey"],"[ENDCONF]")
return HttpResponse(ApiReturn(body=varData).toJson())
|
1657942
|
import os
from collections import namedtuple
from bokeh.embed import components
from bokeh.io import output_notebook
from bokeh.plotting import show
from cave.analyzer.apt.base_apt import BaseAPT
from cave.reader.runs_container import RunsContainer
from cave.utils.hpbandster_helpers import format_budgets
Line = namedtuple('Line', ['name', 'time', 'mean', 'upper', 'lower', 'config'])
class LossCurves(BaseAPT):
"""
Only works with AutoPyTorch-instance.
Visualize loss-curves of multiple neural networks for comparison in interactive plot.
"""
def __init__(self,
runscontainer: RunsContainer,
incumbent_trajectory: str=None,
):
"""
"""
super().__init__(runscontainer,
incumbent_trajectory=incumbent_trajectory,
)
self.rng = self.runscontainer.get_rng()
self.scenario = self.runscontainer.scenario
self.output_dir = os.path.join(self.runscontainer.output_dir, "tensorboard")
self.rh = self.runscontainer.get_aggregated(False, False)[0].validated_runhistory
# Run-specific / budget specific infos
if len(self.runscontainer.get_budgets()) > 1:
self.runs = self.runscontainer.get_aggregated(keep_folders=False, keep_budgets=True)
else:
self.runs = self.runscontainer.get_aggregated(keep_folders=True, keep_budgets=False)
self.formatted_budgets = format_budgets(self.runscontainer.get_budgets())
# Will be set during execution:
self.plots = [] # List with paths to '.png's
def get_name(self):
return "Loss Curves"
def plot(self):
"""
Plot performance over time, using all trajectory entries.
max_time denotes max(wallclock_limit, highest recorded time).
"""
#TODO Read in Tensorboard information
#TODO interactive loss-plots
raise NotImplementedError()
def get_plots(self):
return self.plots
def get_html(self, d=None, tooltip=None):
script, div = components(self.plot())
if d is not None:
d[self.name] = {
"bokeh" : (script, div),
"tooltip" : self.__doc__,
}
return script, div
def get_jupyter(self):
output_notebook()
show(self.plot())
|
1657945
|
from django.urls import include, path
from .views import (
ContractPrivateMediaView,
CreateInvoiceView,
CreatePaymentRequestView,
CreateVendorView,
DeleteInvoiceView,
DeletePaymentRequestView,
EditInvoiceView,
EditPaymentRequestView,
InvoiceListView,
InvoicePrivateMedia,
InvoiceView,
PaymentRequestListView,
PaymentRequestPrivateMedia,
PaymentRequestView,
ProjectDetailPDFView,
ProjectDetailSimplifiedView,
ProjectDetailView,
ProjectEditView,
ProjectListView,
ProjectOverviewView,
ProjectPrivateMediaView,
ReportDetailView,
ReportListView,
ReportPrivateMedia,
ReportSkipView,
ReportUpdateView,
VendorDetailView,
VendorPrivateMediaView,
)
app_name = 'projects'
urlpatterns = [
path('', ProjectOverviewView.as_view(), name='overview'),
path('all/', ProjectListView.as_view(), name='all'),
path('<int:pk>/', include([
path('', ProjectDetailView.as_view(), name='detail'),
path('edit/', ProjectEditView.as_view(), name="edit"),
path('documents/<int:file_pk>/', ProjectPrivateMediaView.as_view(), name="document"),
path('contract/<int:file_pk>/', ContractPrivateMediaView.as_view(), name="contract"),
path('download/', ProjectDetailPDFView.as_view(), name='download'),
path('simplified/', ProjectDetailSimplifiedView.as_view(), name='simplified'),
path('request/', CreatePaymentRequestView.as_view(), name='request'),
path('invoice/', CreateInvoiceView.as_view(), name='invoice'),
path('vendor/', CreateVendorView.as_view(), name='vendor'),
path('vendor/<int:vendor_pk>/', VendorDetailView.as_view(), name='vendor-detail'),
path('vendor/<int:vendor_pk>/documents/<int:file_pk>/', VendorPrivateMediaView.as_view(), name='vendor-documents'),
])),
path('payment-requests/', include(([
path('', PaymentRequestListView.as_view(), name='all'),
path('<int:pk>/', include([
path('', PaymentRequestView.as_view(), name='detail'),
path('edit/', EditPaymentRequestView.as_view(), name='edit'),
path('delete/', DeletePaymentRequestView.as_view(), name='delete'),
path('documents/invoice/', PaymentRequestPrivateMedia.as_view(), name="invoice"),
path('documents/receipt/<int:file_pk>/', PaymentRequestPrivateMedia.as_view(), name="receipt"),
])),
], 'payments'))),
path('invoices/', include(([
path('', InvoiceListView.as_view(), name='all'),
path('<int:pk>/', include([
path('', InvoiceView.as_view(), name='detail'),
path('edit/', EditInvoiceView.as_view(), name='edit'),
path('delete/', DeleteInvoiceView.as_view(), name='delete'),
path('documents/invoice/', InvoicePrivateMedia.as_view(), name="invoice-document"),
path('documents/supporting/<int:file_pk>/', InvoicePrivateMedia.as_view(), name="supporting-document"),
])),
], 'invoices'))),
path('reports/', include(([
path('', ReportListView.as_view(), name='all'),
path('<int:pk>/', include([
path('', ReportDetailView.as_view(), name='detail'),
path('skip/', ReportSkipView.as_view(), name='skip'),
path('edit/', ReportUpdateView.as_view(), name='edit'),
path('documents/<int:file_pk>/', ReportPrivateMedia.as_view(), name="document"),
])),
], 'reports'))),
path('venor/', include(([
path('', ReportListView.as_view(), name='all'),
path('<int:pk>/', include([
path('', ReportDetailView.as_view(), name='detail'),
path('skip/', ReportSkipView.as_view(), name='skip'),
path('edit/', ReportUpdateView.as_view(), name='edit'),
path('documents/<int:file_pk>/', ReportPrivateMedia.as_view(), name="document"),
])),
], 'reports'))),
]
|
1657950
|
import numpy as np
import torch
from torch.optim import Adam
import gym
import time
import yaml
import safe_rl.algos.cppo.core as core
from safe_rl.utils.logx import EpochLogger
from safe_rl.utils.mpi_pytorch import setup_pytorch_for_mpi, sync_params, mpi_avg_grads
from safe_rl.utils.mpi_tools import (mpi_fork, mpi_avg, proc_id, mpi_statistics_scalar,
num_procs, mpi_sum)
from extra_envs.wrappers import Intervention
from extra_envs.intervener.base import Intervener
class CPPOBuffer:
"""
A buffer for storing trajectories experienced by a PPO agent interacting
with the environment, and using Generalized Advantage Estimation (GAE-Lambda)
for calculating the advantages of state-action pairs.
"""
def __init__(self, obs_dim, act_dim, size, gamma=0.99, lam=0.95, scaling=1.,
normalize_adv=False):
self.obs_buf = np.zeros(core.combined_shape(size, obs_dim), dtype=np.float32)
self.act_buf = np.zeros(core.combined_shape(size, act_dim), dtype=np.float32)
# Associated with task reward
self.adv_buf = np.zeros(size, dtype=np.float32)
self.rew_buf = np.zeros(size, dtype=np.float32)
self.ret_buf = np.zeros(size, dtype=np.float32)
self.val_buf = np.zeros(size, dtype=np.float32)
# Associated with task cost
self.cadv_buf = np.zeros(size, dtype=np.float32)
self.cost_buf = np.zeros(size, dtype=np.float32)
self.cret_buf = np.zeros(size, dtype=np.float32)
self.cval_buf = np.zeros(size, dtype=np.float32)
self.intv_buf = np.zeros(size, dtype=np.bool)
self.logp_buf = np.zeros(size, dtype=np.float32)
self.normalize_adv = normalize_adv
self.gamma, self.lam, self.scaling = gamma, lam, scaling
self.ptr, self.path_start_idx, self.max_size = 0, 0, size
def store(self, obs, act, rew, val, cost, cval, intv, logp):
"""
Append one timestep of agent-environment interaction to the buffer.
"""
assert self.ptr < self.max_size # buffer has to have room so you can store
self.obs_buf[self.ptr] = obs
self.act_buf[self.ptr] = act
# Reward
self.rew_buf[self.ptr] = self.scaling*rew
self.val_buf[self.ptr] = val
# Cost
self.cost_buf[self.ptr] = self.scaling*cost
self.cval_buf[self.ptr] = cval
self.intv_buf[self.ptr] = intv
self.logp_buf[self.ptr] = logp
self.ptr += 1
def finish_path(self, last_val=0, last_cval=0):
"""
Call this at the end of a trajectory, or when one gets cut off
by an epoch ending. This looks back in the buffer to where the
trajectory started, and uses rewards and value estimates from
the whole trajectory to compute advantage estimates with GAE-Lambda,
as well as compute the rewards-to-go for each state, to use as
the targets for the value function.
The "last_val" argument should be 0 if the trajectory ended
because the agent reached a terminal state (died), and otherwise
should be V(s_T), the value function estimated for the last state.
This allows us to bootstrap the reward-to-go calculation to account
for timesteps beyond the arbitrary episode horizon (or epoch cutoff).
"""
path_slice = slice(self.path_start_idx, self.ptr)
###########
# Rewards #
###########
rews = np.append((1-self.gamma)*self.rew_buf[path_slice], last_val)
vals = np.append(self.val_buf[path_slice], last_val)
# the next two lines implement GAE-Lambda advantage calculation
deltas = rews[:-1] + self.gamma * vals[1:] - vals[:-1]
self.adv_buf[path_slice] = core.discount_cumsum(deltas, self.gamma * self.lam)
# the next line computes rewards-to-go, to be targets for the value function
self.ret_buf[path_slice] = core.discount_cumsum(rews, self.gamma)[:-1]
#########
# Costs #
#########
costs = np.append((1-self.gamma)*self.cost_buf[path_slice], last_cval)
cvals = np.append(self.cval_buf[path_slice], last_cval)
cdeltas = costs[:-1] + self.gamma*cvals[1:] - cvals[:-1]
self.cadv_buf[path_slice] = core.discount_cumsum(cdeltas, self.gamma*self.lam)
self.cret_buf[path_slice] = core.discount_cumsum(costs, self.gamma)[:-1]
self.path_start_idx = self.ptr
def get(self, log_penalty=-np.infty):
"""
Call this at the end of an epoch to get all of the data from
the buffer, with advantages appropriately normalized (shifted to have
mean zero and std one). Also, resets some pointers in the buffer.
"""
assert self.ptr == self.max_size # buffer has to be full before you can get
self.ptr, self.path_start_idx = 0, 0
# the next two lines implement the advantage normalization trick
weight = 1/(1 + np.exp(-log_penalty))
lagrange_adv_buf = (1-weight)*self.adv_buf - weight*self.cadv_buf
adv_mean, adv_std = mpi_statistics_scalar(lagrange_adv_buf)
lagrange_adv_buf = lagrange_adv_buf - adv_mean
lagrange_adv_buf /= (adv_std if self.normalize_adv else self.scaling)
data = dict(obs=self.obs_buf, act=self.act_buf, ret=self.ret_buf,
cret=self.cret_buf, adv=lagrange_adv_buf, logp=self.logp_buf)
out = {k: torch.as_tensor(v, dtype=torch.float32) for k,v in data.items()}
out.update(intv=self.intv_buf)
return out
def cppo(env_fn, actor_critic=core.MLPActorCritic, ac_kwargs=dict(), seed=0,
steps_per_epoch=4000, epochs=50, gamma=0.99, clip_ratio=0.2, pi_lr=3e-4,
vf_lr=1e-3, train_pi_iters=80, train_v_iters=80, lam=0.97, max_ep_len=1000,
target_kl=0.01, logger_kwargs=dict(), save_freq=10,
num_test_episodes=10, ent_bonus=0.001, scaling=100., dont_normalize_adv=False,
# Cost constraint/penalties
cost_lim=0.01, penalty=1., penalty_lr=5e-2, update_penalty_every=1,
optimize_penalty=False, ignore_unsafe_cost=False
):
"""
Proximal Policy Optimization (by clipping),
with early stopping based on approximate KL
Args:
env_fn : A function which creates a copy of the environment.
The environment must satisfy the OpenAI Gym API.
actor_critic: The constructor method for a PyTorch Module with a
``step`` method, an ``act`` method, a ``pi`` module, and a ``v``
module. The ``step`` method should accept a batch of observations
and return:
=========== ================ ======================================
Symbol Shape Description
=========== ================ ======================================
``a`` (batch, act_dim) | Numpy array of actions for each
| observation.
``v`` (batch,) | Numpy array of value estimates
| for the provided observations.
``logp_a`` (batch,) | Numpy array of log probs for the
| actions in ``a``.
=========== ================ ======================================
The ``act`` method behaves the same as ``step`` but only returns ``a``.
The ``pi`` module's forward call should accept a batch of
observations and optionally a batch of actions, and return:
=========== ================ ======================================
Symbol Shape Description
=========== ================ ======================================
``pi`` N/A | Torch Distribution object, containing
| a batch of distributions describing
| the policy for the provided observations.
``logp_a`` (batch,) | Optional (only returned if batch of
| actions is given). Tensor containing
| the log probability, according to
| the policy, of the provided actions.
| If actions not given, will contain
| ``None``.
=========== ================ ======================================
The ``v`` module's forward call should accept a batch of observations
and return:
=========== ================ ======================================
Symbol Shape Description
=========== ================ ======================================
``v`` (batch,) | Tensor containing the value estimates
| for the provided observations. (Critical:
| make sure to flatten this!)
=========== ================ ======================================
ac_kwargs (dict): Any kwargs appropriate for the ActorCritic object
you provided to PPO.
seed (int): Seed for random number generators.
steps_per_epoch (int): Number of steps of interaction (state-action pairs)
for the agent and the environment in each epoch.
epochs (int): Number of epochs of interaction (equivalent to
number of policy updates) to perform.
gamma (float): Discount factor. (Always between 0 and 1.)
clip_ratio (float): Hyperparameter for clipping in the policy objective.
Roughly: how far can the new policy go from the old policy while
still profiting (improving the objective function)? The new policy
can still go farther than the clip_ratio says, but it doesn't help
on the objective anymore. (Usually small, 0.1 to 0.3.) Typically
denoted by :math:`\epsilon`.
pi_lr (float): Learning rate for policy optimizer.
vf_lr (float): Learning rate for value function optimizer.
train_pi_iters (int): Maximum number of gradient descent steps to take
on policy loss per epoch. (Early stopping may cause optimizer
to take fewer than this.)
train_v_iters (int): Number of gradient descent steps to take on
value function per epoch.
lam (float): Lambda for GAE-Lambda. (Always between 0 and 1,
close to 1.)
max_ep_len (int): Maximum length of trajectory / episode / rollout.
target_kl (float): Roughly what KL divergence we think is appropriate
between new and old policies after an update. This will get used
for early stopping. (Usually small, 0.01 or 0.05.)
logger_kwargs (dict): Keyword args for EpochLogger.
save_freq (int): How often (in terms of gap between epochs) to save
the current policy and value function.
scaling (float): How much to scale the empirical returns to aid in learning the
value function
cost_lim (float): The tolerated cost limit
penalty (float): The initial penalty value
penalty_lr (float): The update size for the penalty
update_penalty_every (int): After how many policy updates we update the penalty
optimize_penalty (bool): Whether to optimize the penalty or keep it fixed
ignore_unsafe_cost (bool): Whether to consider the unsafe cost when computing the
cost.
"""
# Special function to avoid certain slowdowns from PyTorch + MPI combo.
setup_pytorch_for_mpi()
# Set up logger and save configuration
logger = EpochLogger(**logger_kwargs)
logger.save_config(locals())
# Random seed
seed += 10000 * proc_id()
torch.manual_seed(seed)
np.random.seed(seed)
# Instantiate environment
env = env_fn()
test_env = env.env
#test_env = gym.make('extra_envs:HalfCheetah-v0')
obs_dim = env.observation_space.shape
act_dim = env.action_space.shape
rew_range = env.reward_range
v_range = (scaling*rew_range[0], scaling*rew_range[1])
vc_range = (0, scaling*1)
max_ep_len = min(max_ep_len, env.env._max_episode_steps)
# Create actor-critic module
ac = actor_critic(env.observation_space, env.action_space, v_range=v_range,
vc_range=vc_range, pred_std=True, **ac_kwargs)
# Sync params across processes
sync_params(ac)
# Count variables
var_counts = tuple(core.count_vars(module) for module in [ac.pi, ac.v])
logger.log('\nNumber of parameters: \t pi: %d, \t v: %d\n' % var_counts)
# Set up experience buffer
local_steps_per_epoch = int(steps_per_epoch / num_procs())
buf = CPPOBuffer(obs_dim, act_dim, local_steps_per_epoch, gamma, lam, scaling,
normalize_adv=not dont_normalize_adv)
# Penalty learning
if optimize_penalty:
penalty_param_init = max(np.log(penalty), -100.)
penalty_param = torch.tensor([penalty_param_init], requires_grad=True,
dtype=torch.float32)
penalty_torch = torch.exp(penalty_param)
else:
penalty_torch = torch.tensor([penalty], dtype=torch.float32)
# Set up function for computing PPO policy loss
def compute_loss_pi(data):
obs, act, adv, logp_old = data['obs'], data['act'], data['adv'], data['logp']
intv = data['intv']
obs, act, adv, logp_old = [x[~intv] for x in [obs, act, adv, logp_old]]
# Policy loss
pi, logp = ac.pi(obs, act)
ratio = torch.exp(logp - logp_old)
clip_adv = torch.clamp(ratio, 1-clip_ratio, 1+clip_ratio) * adv
ent = pi.entropy().mean().item()
loss_pi = -(torch.min(ratio * adv, clip_adv) + ent_bonus*ent).mean()
# Useful extra info
approx_kl = (logp_old - logp).mean().item()
ent = pi.entropy().mean().item()
clipped = ratio.gt(1+clip_ratio) | ratio.lt(1-clip_ratio)
clipfrac = torch.as_tensor(clipped, dtype=torch.float32).mean().item()
pi_info = dict(kl=approx_kl, ent=ent, cf=clipfrac)
return loss_pi, pi_info
# Set up function for computing value loss
loss_fn = torch.nn.SmoothL1Loss()
def compute_loss_v(data):
obs, ret = data['obs'], data['ret']
return loss_fn(ac.v(obs), ret)
def compute_loss_vc(data):
obs, cret = data['obs'], data['cret']
return loss_fn(ac.vc(obs), cret)
def compute_loss_penalty(cost):
return -penalty_torch.squeeze()*(cost - cost_lim)
# Set up optimizers for policy and value function
pi_optimizer = Adam(ac.pi.parameters(), lr=pi_lr)
vf_optimizer = Adam(ac.v.parameters(), lr=vf_lr)
vcf_optimizer = Adam(ac.vc.parameters(), lr=vf_lr)
if optimize_penalty:
penalty_optimizer = Adam([penalty_param], lr=penalty_lr)
# Set up model saving
logger.setup_pytorch_saver(ac)
def update(update_penalty):
curr_cost = logger.get_stats('EpCost')[0]
if curr_cost > cost_lim:
logger.log("Warning! Safety constraint violated.", 'red')
data = buf.get(np.log(penalty_torch.item()))
pi_l_old, pi_info_old = compute_loss_pi(data)
pi_l_old = pi_l_old.item()
v_l_old = compute_loss_v(data).item()
vc_l_old = compute_loss_vc(data).item()
# Train policy with multiple steps of gradient descent
for i in range(train_pi_iters):
pi_optimizer.zero_grad()
loss_pi, pi_info = compute_loss_pi(data)
kl = mpi_avg(pi_info['kl'])
if kl > 1.5 * target_kl:
logger.log('Early stopping at step %d due to reaching max kl.' % i)
break
loss_pi.backward()
mpi_avg_grads(ac.pi) # average grads across MPI processes
pi_optimizer.step()
logger.store(StopIter=i)
# Value function learning
for i in range(train_v_iters):
vf_optimizer.zero_grad()
loss_v = compute_loss_v(data)
loss_v.backward()
mpi_avg_grads(ac.v) # average grads across MPI processes
vf_optimizer.step()
vcf_optimizer.zero_grad()
loss_vc = compute_loss_vc(data)
loss_vc.backward()
mpi_avg_grads(ac.vc) # average grads across MPI processes
vcf_optimizer.step()
# Penalty update
if optimize_penalty and update_penalty:
penalty_optimizer.zero_grad()
loss_pen = compute_loss_penalty(curr_cost)
loss_pen.backward()
penalty_param_np = penalty_param.grad.numpy()
avg_grad = mpi_avg(penalty_param_np)
penalty_param.grad = torch.as_tensor(avg_grad)
penalty_optimizer.step()
# Log changes from update
kl, ent, cf = pi_info['kl'], pi_info_old['ent'], pi_info['cf']
logger.store(LossPi=pi_l_old, LossV=v_l_old, LossVC=vc_l_old,
KL=kl, Entropy=ent, ClipFrac=cf,
DeltaLossPi=(loss_pi.item() - pi_l_old),
DeltaLossV=(loss_v.item() - v_l_old),
DeltaLossVC=(loss_vc.item() - vc_l_old))
local_num_test_episodes = int(num_test_episodes / num_procs())
def test_agent():
for _ in range(local_num_test_episodes):
o, d, ep_ret, ep_cost, ep_len = test_env.reset(), False, 0, 0, 0
while not (d or ep_len == max_ep_len):
a = ac.act(torch.as_tensor(o, dtype=torch.float32), deterministic=True)
o, r, d, info = test_env.step(a)
ep_ret += r
ep_cost += info.get('cost', 0.)
ep_len += 1
logger.store(TestEpRet=ep_ret, TestEpCost=ep_cost, TestEpLen=ep_len)
# Prepare for interaction with environment
start_time = time.time()
o, ep_ret, ep_cost, ep_len, cum_cost, cum_viol = env.reset(), 0, 0, 0, 0, 0
ep_surr_cost, cum_surr_cost = 0, 0
already_intv, local_episodes = False, 1
# Main loop: collect experience in env and update/log each epoch
for epoch in range(epochs):
if optimize_penalty:
penalty_torch = torch.exp(penalty_param)
for t in range(local_steps_per_epoch):
a, v, vc, logp = ac.step(torch.as_tensor(o, dtype=torch.float32))
next_o, r, d, info = env.step(a)
intervened = info.get('intervened', False)
if not intervened:
c = info.get('cost', 0.)
violation = (c == 1.)
ep_ret += r
ep_cost += c
ep_len += 1
cum_cost += c
cum_viol += violation
surr_c = (1-ignore_unsafe_cost)*c
ep_surr_cost += surr_c
cum_surr_cost += surr_c
buf.store(o, a, r, v, 0., vc, already_intv, logp)
elif env.intervener.mode == env.intervener.MODE.SAFE_ACTION:
next_o, r_safe, d, info_safe = info['safe_step_output']
c_safe = info_safe.get('cost', 0.)
violation = (c_safe == 1.)
ep_ret += r_safe
ep_cost += c_safe
ep_len += 1
cum_cost += c_safe
cum_viol += violation
ep_surr_cost += 1.
cum_surr_cost += 1.
buf.store(o, a, 0.*r, v, 1., vc, already_intv, logp)
elif env.intervener.mode == env.intervener.MODE.TERMINATE:
violation = False
ep_surr_cost += 1.
cum_surr_cost += 1.
buf.store(o, a, 0.*r, v, 1., vc, already_intv, logp)
else:
raise NotImplementedError
# store whether agent has been intervened in current episode
already_intv |= intervened
# save and log
logger.store(VVals=v, VcVals=vc)
# Update obs (critical!)
o = next_o
if intervened:
if env.intervener.mode == env.intervener.MODE.SAFE_ACTION:
while not (d or ep_len == max_ep_len):
_, _, _, info = env.step()
_, r_safe, d, info_safe = info['safe_step_output']
c_safe = info_safe.get('cost', 0.)
ep_ret += r_safe
ep_cost += c_safe
ep_len += 1
cum_cost += c_safe
elif env.intervener.mode == env.intervener.MODE.TERMINATE:
pass
else:
raise NotImplementedError
timeout = ep_len == max_ep_len
terminal = d or timeout
epoch_ended = (t == local_steps_per_epoch-1)
if terminal or epoch_ended:
if epoch_ended and not terminal:
print('Warning: trajectory cut off by epoch at %d steps.' % ep_len,
flush=True)
# if trajectory didn't reach terminal state, bootstrap value target
if intervened and env.intervener.mode in [Intervener.MODE.SAFE_ACTION,
Intervener.MODE.TERMINATE]:
v, vc = 0., vc_range[1]
elif violation:
v = 0.
vc = (1-ignore_unsafe_cost)*vc_range[1]
elif timeout or epoch_ended:
_, v, vc, _ = ac.step(torch.as_tensor(o, dtype=torch.float32))
else:
v = vc = 0
buf.finish_path(v, vc)
if terminal:
# only save EpRet / EpLen if trajectory finished
logger.store(EpRet=ep_ret, EpCost=ep_cost, EpLen=ep_len,
EpSurrCost=ep_surr_cost)
o, ep_ret, ep_cost, ep_len = env.reset(), 0, 0, 0
ep_surr_cost, already_intv = 0, False
local_episodes += 1
elif intervened and env.intervener.mode == Intervener.MODE.SAFE_ACTION:
buf.finish_path(0., vc_range[1])
# Save model
if (epoch % save_freq == 0) or (epoch == epochs-1):
logger.save_state({'env': env.env}, None)
# Perform PPO update!
update(update_penalty_every > 0 and (epoch+1) % update_penalty_every == 0)
# Cumulative cost calculations
cumulative_cost = mpi_sum(cum_cost)
cumulative_surr_cost = mpi_sum(cum_surr_cost)
cumulative_violations = mpi_sum(cum_viol)
episodes = mpi_sum(local_episodes)
cost_rate = cumulative_cost / episodes
surr_cost_rate = cumulative_surr_cost / episodes
viol_rate = cumulative_violations / episodes
# Test the performance of the deterministic version of the agent.
test_agent()
o = env.reset()
# Log info about epoch
logger.log_tabular('Epoch', epoch)
# Performance
logger.log_tabular('EpRet', with_min_and_max=True)
logger.log_tabular('EpCost', with_min_and_max=True)
logger.log_tabular('EpLen', average_only=True)
# Cost
logger.log_tabular('CumulativeCost', cumulative_cost)
logger.log_tabular('LogCumulativeCost', np.log10(cumulative_cost))
logger.log_tabular('CostRate', cost_rate)
logger.log_tabular('EpSurrCost', with_min_and_max=True)
logger.log_tabular('CumulativeSurrCost', cumulative_surr_cost)
logger.log_tabular('LogCumulativeSurrCost', np.log10(cumulative_surr_cost))
logger.log_tabular('SurrCostRate', surr_cost_rate)
logger.log_tabular('CumulativeViolations', cumulative_violations)
logger.log_tabular('LogCumulativeViolations', np.log10(cumulative_violations))
logger.log_tabular('ViolationRate', viol_rate)
# Test performance
logger.log_tabular('TestEpRet', with_min_and_max=True)
logger.log_tabular('TestEpCost', with_min_and_max=True)
logger.log_tabular('TestEpLen', average_only=True)
# Penalty
logger.log_tabular('Penalty', float(penalty_torch.item()))
logger.log_tabular('LogPenalty', np.log10(float(penalty_torch.item())))
# Value function stats
logger.log_tabular('VVals', with_min_and_max=True)
logger.log_tabular('VcVals', with_min_and_max=True)
# Policy loss and change
logger.log_tabular('LossPi', average_only=True)
logger.log_tabular('DeltaLossPi', average_only=True)
# Value loss and change
logger.log_tabular('LossV', average_only=True)
logger.log_tabular('LossVC', average_only=True)
logger.log_tabular('DeltaLossV', average_only=True)
logger.log_tabular('DeltaLossVC', average_only=True)
# Policy stats
logger.log_tabular('Entropy', average_only=True)
logger.log_tabular('KL', average_only=True)
# PPO stats
logger.log_tabular('ClipFrac', average_only=True)
logger.log_tabular('StopIter', average_only=True)
# Time and steps elapsed
logger.log_tabular('Time', time.time()-start_time)
logger.log_tabular('TotalEnvInteracts', (epoch+1)*steps_per_epoch)
logger.dump_tabular()
|
1657951
|
import torch
from torch import nn
from torch.nn import functional as F
import torchvision
if __package__:
pass
else:
import sys
sys.path.insert(0, '..')
__all__ = ['ResNet50TP', 'ResNet50TA', 'ResNet50RNN']
class ResNet50TP(nn.Module):
def __init__(self, num_classes, loss={'xent'}, **kwargs):
super(ResNet50TP, self).__init__()
self.loss = loss
resnet50 = torchvision.models.resnet50(pretrained=True)
self.base = nn.Sequential(*list(resnet50.children())[:-2])
self.feat_dim = 2048
self.classifier = nn.Linear(self.feat_dim, num_classes)
def forward(self, x):
b = x.size(0)
t = x.size(1)
x = x.view(b * t, x.size(2), x.size(3), x.size(4))
x = self.base(x)
x = F.avg_pool2d(x, x.size()[2:])
x = x.view(b, t, -1)
x = x.permute(0, 2, 1)
f = F.avg_pool1d(x, t)
f = f.view(b, self.feat_dim)
if not self.training:
return f
y = self.classifier(f)
if self.loss == {'xent'}:
return y
elif self.loss == {'xent', 'htri'}:
return y, f
elif self.loss == {'cent'}:
return y, f
else:
raise KeyError("Unsupported loss: {}".format(self.loss))
class ResNet50TA(nn.Module):
def __init__(self, num_classes, loss={'xent'}, **kwargs):
super(ResNet50TA, self).__init__()
self.loss = loss
resnet50 = torchvision.models.resnet50(pretrained=True)
self.base = nn.Sequential(*list(resnet50.children())[:-2])
self.att_gen = 'softmax' # method for attention generation: softmax or sigmoid
self.feat_dim = 2048 # feature dimension
self.middle_dim = 256 # middle layer dimension
self.classifier = nn.Linear(self.feat_dim, num_classes)
# 7,4 cooresponds to 224, 112 input image size
self.attention_conv = nn.Conv2d(self.feat_dim, self.middle_dim, [7, 4])
self.attention_tconv = nn.Conv1d(self.middle_dim, 1, 3, padding=1)
def forward(self, x):
b = x.size(0)
t = x.size(1)
x = x.view(b * t, x.size(2), x.size(3), x.size(4))
x = self.base(x)
a = F.relu(self.attention_conv(x))
a = a.view(b, t, self.middle_dim)
a = a.permute(0, 2, 1)
a = F.relu(self.attention_tconv(a))
a = a.view(b, t)
x = F.avg_pool2d(x, x.size()[2:])
if self.att_gen == 'softmax':
a = F.softmax(a, dim=1)
elif self.att_gen == 'sigmoid':
a = F.sigmoid(a)
a = F.normalize(a, p=1, dim=1)
else:
raise KeyError("Unsupported attention generation function: {}".format(self.att_gen))
x = x.view(b, t, -1)
a = torch.unsqueeze(a, -1)
a = a.expand(b, t, self.feat_dim)
att_x = torch.mul(x, a)
att_x = torch.sum(att_x, 1)
f = att_x.view(b, self.feat_dim)
if not self.training:
return f
y = self.classifier(f)
if self.loss == {'xent'}:
return y
elif self.loss == {'xent', 'htri'}:
return y, f
elif self.loss == {'cent'}:
return y, f
else:
raise KeyError("Unsupported loss: {}".format(self.loss))
class ResNet50RNN(nn.Module):
def __init__(self, num_classes, loss={'xent'}, **kwargs):
super(ResNet50RNN, self).__init__()
self.loss = loss
resnet50 = torchvision.models.resnet50(pretrained=True)
self.base = nn.Sequential(*list(resnet50.children())[:-2])
self.hidden_dim = 512
self.feat_dim = 2048
self.classifier = nn.Linear(self.hidden_dim, num_classes)
self.lstm = nn.LSTM(input_size=self.feat_dim, hidden_size=self.hidden_dim, num_layers=1, batch_first=True)
def forward(self, x):
b = x.size(0)
t = x.size(1)
x = x.view(b * t, x.size(2), x.size(3), x.size(4))
x = self.base(x)
x = F.avg_pool2d(x, x.size()[2:])
x = x.view(b, t, -1)
output, (h_n, c_n) = self.lstm(x)
output = output.permute(0, 2, 1)
f = F.avg_pool1d(output, t)
f = f.view(b, self.hidden_dim)
if not self.training:
return f
y = self.classifier(f)
if self.loss == {'xent'}:
return y
elif self.loss == {'xent', 'htri'}:
return y, f
elif self.loss == {'cent'}:
return y, f
else:
raise KeyError("Unsupported loss: {}".format(self.loss))
|
1657954
|
from ._version import __version__, __title__
from ._types import ProxyType
from ._helpers import parse_proxy_url
from ._errors import (
ProxyError,
ProxyTimeoutError,
ProxyConnectionError,
)
__all__ = (
'__title__',
'__version__',
'ProxyError',
'ProxyTimeoutError',
'ProxyConnectionError',
'ProxyType',
'parse_proxy_url',
)
|
1657994
|
from metrics import *
import random
from Load_npz import load_npz_data2, load_npz_data_ood_train2
from scipy.special import loggamma, digamma
from utils import load_data_threshold, load_data_ood
from sklearn.metrics import roc_auc_score
from sklearn.metrics import average_precision_score
def vacuity_uncertainty(Baye_result):
# Vacuity uncertainty
mean = np.mean(Baye_result, axis=0)
class_num = mean.shape[1]
alpha = np.exp(mean) + 1.0
S = np.sum(alpha, axis=1, keepdims=True)
un_vacuity = class_num / S
return un_vacuity
def vacuity_sgcn(mean):
# Vacuity uncertainty
class_num = mean.shape[1]
alpha = np.exp(mean) + 1.0
S = np.sum(alpha, axis=1, keepdims=True)
un_vacuity = class_num / S
return un_vacuity
def dissonance_uncertainty(Baye_result):
mean = np.mean(Baye_result, axis=0)
evidence = np.exp(mean)
alpha = np.exp(mean) + 1.0
S = np.sum(alpha, axis=1, keepdims=True)
belief = evidence / S
dis_un = np.zeros_like(S)
for k in range(belief.shape[0]):
for i in range(belief.shape[1]):
bi = belief[k][i]
term_Bal = 0.0
term_bj = 0.0
for j in range(belief.shape[1]):
if j != i:
bj = belief[k][j]
term_Bal += bj * Bal(bi, bj)
term_bj += bj
dis_ki = bi * term_Bal / term_bj
dis_un[k] += dis_ki
return dis_un
def dissonance_sgcn(mean):
evidence = np.exp(mean)
alpha = np.exp(mean) + 1.0
S = np.sum(alpha, axis=1, keepdims=True)
belief = evidence / S
dis_un = np.zeros_like(S)
for k in range(belief.shape[0]):
for i in range(belief.shape[1]):
bi = belief[k][i]
term_Bal = 0.0
term_bj = 0.0
for j in range(belief.shape[1]):
if j != i:
bj = belief[k][j]
term_Bal += bj * Bal(bi, bj)
term_bj += bj
dis_ki = bi * term_Bal / term_bj
dis_un[k] += dis_ki
return dis_un
def Bal(b_i, b_j):
result = 1 - np.abs(b_i - b_j) / (b_i + b_j)
return result
def entropy_SL(mean):
class_num = mean.shape[1]
alpha = np.exp(mean) + 1.0
S = np.sum(alpha, axis=1, keepdims=True)
prob = alpha / S
entropy = - prob * (np.log(prob) / np.log(class_num))
total_un = np.sum(entropy, axis=1, keepdims=True)
class_un = entropy
return total_un, class_un
def entropy(pred):
class_num = pred.shape[1]
prob = pred
entropy = - prob * (np.log(prob) / np.log(class_num))
total_un = np.sum(entropy, axis=1, keepdims=True)
class_un = entropy
return total_un, class_un
def entropy_softmax(pred):
class_num = pred.shape[1]
prob = softmax(pred)
entropy = - prob * (np.log(prob) / np.log(class_num))
total_un = np.sum(entropy, axis=1, keepdims=True)
class_un = entropy
return total_un, class_un
def entropy_dropout(pred):
mean = []
for p in pred:
prob_i = softmax(p)
mean.append(prob_i)
mean = np.mean(mean, axis=0)
class_num = mean.shape[1]
prob = mean
entropy = - prob * (np.log(prob) / np.log(class_num))
total_un = np.sum(entropy, axis=1, keepdims=True)
class_un = entropy
return total_un, class_un
def aleatoric_dropout(Baye_result):
al_un = []
al_class_un = []
for item in Baye_result:
un, class_un = entropy_softmax(item)
al_un.append(un)
al_class_un.append(class_un)
ale_un = np.mean(al_un, axis=0)
ale_class_un = np.mean(al_class_un, axis=0)
return ale_un, ale_class_un
def softmax(pred):
ex = np.exp(pred - np.amax(pred, axis=1, keepdims=True))
prob = ex / np.sum(ex, axis=1, keepdims=True)
return prob
def total_uncertainty(Baye_result):
prob_all = []
class_num = Baye_result[0].shape[1]
for item in Baye_result:
alpha = np.exp(item) + 1.0
S = np.sum(alpha, axis=1, keepdims=True)
prob = alpha / S
prob_all.append(prob)
prob_mean = np.mean(prob_all, axis=0)
total_class_un = - prob_mean * (np.log(prob_mean) / np.log(class_num))
total_un = np.sum(total_class_un, axis=1, keepdims=True)
return total_un, total_class_un
def aleatoric_uncertainty(Baye_result):
al_un = []
al_class_un = []
for item in Baye_result:
un, class_un = entropy_SL(item)
al_un.append(un)
al_class_un.append(class_un)
ale_un = np.mean(al_un, axis=0)
ale_class_un = np.mean(al_class_un, axis=0)
return ale_un, ale_class_un
def dpn_epistemic(result):
alpha = np.exp(result) + 1.0
S = np.sum(alpha, axis=1, keepdims=True)
p = alpha/S
term1 = np.log(p) - digamma(alpha + 1) + digamma(S + 1)
un = p * term1
un = -np.sum(un, axis=1, keepdims=True)
return un
def get_un_dpn(result):
epi = dpn_epistemic(result)
ent, _ = entropy_SL(result)
alea = ent - epi
uncertainty = []
uncertainty.append(alea)
uncertainty.append(epi)
uncertainty.append(ent)
return uncertainty
def get_un_sgcn(result):
ent, _ = entropy_SL(result)
vac = vacuity_sgcn(result)
diss = dissonance_sgcn(result)
uncertainty = []
uncertainty.append(vac)
uncertainty.append(diss)
uncertainty.append(ent)
return uncertainty
def get_un_EDL(result):
ent, _ = entropy_SL(result)
vac = vacuity_sgcn(result)
diss = dissonance_sgcn(result)
uncertainty = []
uncertainty.append(vac)
uncertainty.append(diss)
uncertainty.append(ent)
return uncertainty
def get_uncertainty(Baye_result):
uncertainty = []
uncertainty_class = []
un_vacuity = vacuity_uncertainty(Baye_result)
un_dissonance = dissonance_uncertainty(Baye_result)
un_total, un_total_class = total_uncertainty(Baye_result)
un_aleatoric, un_aleatoric_class = aleatoric_uncertainty(Baye_result)
# un_epistemic = un_total - un_aleatoric
un_epistemic_class = un_total_class - un_aleatoric_class
un_epistemic = np.sum(un_epistemic_class, axis=1, keepdims=True)
un_var3_class = np.var(Baye_result, axis=0)
un_var3 = np.sum(un_var3_class, axis=1, keepdims=True)
diff_en = diff_entropy(Baye_result)
uncertainty.append(un_vacuity)
uncertainty.append(un_dissonance)
uncertainty.append(un_aleatoric)
uncertainty.append(un_epistemic)
# uncertainty.append(diff_en)
uncertainty.append(un_total)
# uncertainty.append(un_ep_var)
# uncertainty.append(un_ep_var2)
# uncertainty.append(un_var3)
uncertainty_class.append(un_aleatoric_class)
uncertainty_class.append(un_epistemic_class)
uncertainty_class.append(un_total_class)
return uncertainty
def diff_entropy(Baye_result):
mean = np.mean(Baye_result, axis=0)
alpha = np.exp(mean) + 1.0
S = np.sum(alpha, axis=1, keepdims=True)
ln_gamma = loggamma(alpha)
ln_gamma_S = loggamma(S)
term1 = np.sum(ln_gamma, axis=1, keepdims=True) - ln_gamma_S
digmma_alpha = digamma(alpha)
digamma_S = digamma(S)
term2 = (alpha - 1) * (digmma_alpha - digamma_S)
term2 = np.sum(term2, axis=1, keepdims=True)
diff_en = term1 - term2
return diff_en
def get_un_dropout(pred):
un = []
dr_entroy, dr_entroy_class = entropy_dropout(pred)
dr_ale, dr_ale_clsss = aleatoric_dropout(pred)
dr_eps_class = dr_entroy_class - dr_ale_clsss
dr_eps = np.sum(dr_eps_class, axis=1, keepdims=True)
un.append(dr_entroy)
un.append(dr_ale)
un.append(dr_eps)
return un
def get_un_entropy(pred):
un = []
dr_entroy, dr_entroy_class = entropy_softmax(pred)
un.append(dr_entroy)
return un
def Misclassification_npz(output, dataset, model): ## table 2
_, _, _, _, y_test, train_mask, _, test_mask, labels, test_idx = load_npz_data2(dataset, 223)
if model == "S_GCN":
uncertainties = get_un_sgcn(output)
mean = output
elif model == "S_BGCN_T" or model == "S_BGCN_T_K" or model == "S_BGCN":
uncertainties = get_uncertainty(output)
mean = np.mean(output, axis=0)
elif model == "DPN":
uncertainties = get_un_dpn(output)
mean = output
elif model == "EDL":
uncertainties = get_un_EDL(output)
mean = output
elif model == "Drop":
uncertainties = get_un_dropout(output)
mean = np.mean(output, axis=0)
elif model == "GCN":
uncertainties = get_un_entropy(output)
mean = output
prediction = np.equal(np.argmax(mean, 1), np.argmax(labels, 1))
auroc_s = []
aupr_s = []
random.seed(123)
test_index = []
test_idx = list(test_idx)
for i in range(10):
test_index_i = random.sample(test_idx, 1000)
test_index.append(test_index_i)
for index in test_index:
prediction_i = prediction[index]
un_roc = []
un_pr = []
for uncertainty in uncertainties:
un_i = uncertainty[index]
un_roc.append(roc_auc_score(prediction_i, -np.array(un_i)))
un_pr.append(average_precision_score(prediction_i, -np.array(un_i)))
auroc_s.append(un_roc)
aupr_s.append(un_pr)
return np.mean(auroc_s, axis=0), np.mean(aupr_s, axis=0)
def Misclassification(output, dataset, model): ## table 2
_, _, _, _, y_test, train_mask, _, test_mask, labels = load_data_threshold(dataset)
if model == "S_GCN":
uncertainties = get_un_sgcn(output)
mean = output
elif model == "S_BGCN_T" or model == "S_BGCN_T_K" or model == "S_BGCN":
uncertainties = get_uncertainty(output)
mean = np.mean(output, axis=0)
elif model == "DPN":
uncertainties = get_un_dpn(output)
mean = output
elif model == "EDL":
uncertainties = get_un_EDL(output)
mean = output
elif model == "Drop":
uncertainties = get_un_dropout(output)
mean = np.mean(output, axis=0)
elif model == "GCN":
uncertainties = get_un_entropy(output)
mean = output
prediction = np.equal(np.argmax(mean, 1), np.argmax(labels, 1))
train_num = np.sum(train_mask)
test_index = []
auroc_s = []
aupr_s = []
for i in range(10):
test_index_i = random.sample(range(int(train_num), len(test_mask)), 1000)
test_index.append(test_index_i)
for index in test_index:
prediction_i = prediction[index]
un_roc = []
un_pr = []
for uncertainty in uncertainties:
un_i = uncertainty[index]
un_roc.append(roc_auc_score(prediction_i, -np.array(un_i)))
un_pr.append(average_precision_score(prediction_i, -np.array(un_i)))
auroc_s.append(un_roc)
aupr_s.append(un_pr)
return np.mean(auroc_s, axis=0), np.mean(aupr_s, axis=0)
def OOD_Detection_npz(output, dataset, model): ## table 3
_, _, _, _, _, _, test_mask, idx_train = load_npz_data_ood_train2(dataset, 223)
if model == "S_GCN":
uncertainties = get_un_sgcn(output)
elif model == "S_BGCN_T" or model == "S_BGCN_T_K" or model == "S_BGCN":
uncertainties = get_uncertainty(output)
elif model == "DPN":
uncertainties = get_un_dpn(output)
elif model == "EDL":
uncertainties = get_un_EDL(output)
elif model == "Drop":
uncertainties = get_un_dropout(output)
elif model == "GCN":
uncertainties = get_un_entropy(output)
prediction = test_mask
auroc_s = []
aupr_s = []
random.seed(123)
test_idx = list(range(len(test_mask)))
for x in idx_train:
test_idx.remove(x)
test_index = []
for i in range(10):
test_index_i = random.sample(test_idx, 1000)
test_index.append(test_index_i)
for index in test_index:
prediction_i = prediction[index]
un_roc = []
un_pr = []
for uncertainty in uncertainties:
un_i = uncertainty[index]
un_roc.append(roc_auc_score(prediction_i, np.array(un_i)))
un_pr.append(average_precision_score(prediction_i, np.array(un_i)))
auroc_s.append(un_roc)
aupr_s.append(un_pr)
return np.mean(auroc_s, axis=0), np.mean(aupr_s, axis=0)
def OOD_Detection(output, dataset, model): ## table 3
_, _, _, _, _, _, test_mask_all, test_mask = load_data_ood(dataset)
if model == "S_GCN":
uncertainties = get_un_sgcn(output)
elif model == "S_BGCN_T" or model == "S_BGCN_T_K" or model == "S_BGCN":
uncertainties = get_uncertainty(output)
elif model == "DPN":
uncertainties = get_un_dpn(output)
elif model == "EDL":
uncertainties = get_un_EDL(output)
elif model == "Drop":
uncertainties = get_un_dropout(output)
elif model == "GCN":
uncertainties = get_un_entropy(output)
test_num_all = np.sum(test_mask_all)
prediction = test_mask
train_num = len(test_mask_all) - test_num_all
test_index = []
auroc_s = []
aupr_s = []
for i in range(10):
test_index_i = random.sample(range(int(train_num), len(test_mask)), 1000)
test_index.append(test_index_i)
for index in test_index:
prediction_i = prediction[index]
un_roc = []
un_pr = []
for uncertainty in uncertainties:
un_i = uncertainty[index]
un_roc.append(roc_auc_score(prediction_i, np.array(un_i)))
un_pr.append(average_precision_score(prediction_i, np.array(un_i)))
auroc_s.append(un_roc)
aupr_s.append(un_pr)
return np.mean(auroc_s, axis=0), np.mean(aupr_s, axis=0)
|
1658000
|
from django.conf import settings
from django.utils import timezone
class TimezoneMiddleware(object):
def process_request(self, request):
if request.user.is_authenticated():
tz = request.session.get('django_timezone',
default=request.user.profile.time_zone) or settings.TIME_ZONE
timezone.activate(tz)
else:
timezone.deactivate()
|
1658026
|
import json
import unittest
import os
from polylabel import polylabel
cd = os.path.join(os.path.abspath(os.path.dirname(__file__)))
class PolyLabelTestCast(unittest.TestCase):
def test_short(self):
with open(cd + "/fixtures/short.json", "r") as f:
short = json.load(f)
self.assertEqual(polylabel(short), [3317.546875, 1330.796875])
def test_distance(self):
with open(cd + "/fixtures/short.json", "r") as f:
short = json.load(f)
self.assertEqual(polylabel(short, with_distance=True), ([3317.546875, 1330.796875], 5.4406249999999545))
def test_watter1(self):
with open(cd + "/fixtures/water1.json", "r") as f:
water1 = json.load(f)
self.assertEqual(polylabel(water1), [
3865.85009765625, 2124.87841796875])
self.assertEqual(polylabel(water1, 50), [3854.296875, 2123.828125])
def test_float(self):
with open(cd + "/fixtures/float.json", "r") as f:
float_poly = json.load(f)
self.assertEqual(polylabel(float_poly),
[-23.210525613080737, 24.425270860193958])
def test_works_on_degenerate_polygons(self):
out = polylabel([[[0, 0], [1, 0], [2, 0], [0, 0]]])
self.assertEqual(out, [0, 0])
out = polylabel([[[0, 0], [1, 0], [1, 1], [1, 0], [0, 0]]])
self.assertEqual(out, [0, 0])
def test_watter2(self):
with open(cd + "/fixtures/water2.json", "r") as f:
water2 = json.load(f)
self.assertEqual(polylabel(water2, 1), [3263.5, 3263.5])
def test_issue_no5(self):
self.assertEqual(polylabel([[[100, 0], [105, 0], [110, 10], [100, 1], [100, 0]]]), [103.125, 1.875])
if __name__ == '__main__':
unittest.main()
|
1658029
|
from .base import *
from .bert import *
from .kim_cnn import *
from .conv_rnn import *
from .bi_rnn import *
from .siamese_rnn import *
|
1658088
|
from copy import deepcopy
from graph import Graph
from collection.priority_queue import PriorityQueue
def kruskals(adj_list):
'''Return a minimum spanning tree of adj_list using Kruskal's algo.'''
adj_list = deepcopy(adj_list) # Since we need to modify adj_list
expected_mst_edges = (len(adj_list.keys()) - 1) * 2
mst = {k: [] for k in adj_list}
for node in adj_list:
adj_list[node].sort(key=lambda x: x[1]) # Sort by weight edge
while get_edge_num(mst) < expected_mst_edges:
min_edge = get_min_weight_edge(adj_list)
first_node = min_edge[0]
sec_node = min_edge[1]
weight = min_edge[2]
if not does_create_cycle(mst, min_edge): # Then include edge in MST
edge_first_node = (sec_node, weight)
edge_sec_node = (first_node, weight)
mst[first_node].append(edge_first_node)
mst[sec_node].append(edge_sec_node)
del adj_list[first_node][0]
del adj_list[sec_node][0]
return mst
def get_min_weight_edge(adj_list):
'''Return the edge with minimum weight in adj_list.'''
min_edge = None
for key in adj_list:
if adj_list[key]: # If this node has edges
cur_edge = [key] + list(adj_list[key][0])
if min_edge is None:
min_edge = [key] + list(cur_edge)
min_edge = min(min_edge, cur_edge, key=lambda x: x[2])
return min_edge
def does_create_cycle(adj_list, edge):
'''Return whether edge creates a cycle in adj_list.'''
return is_connected(adj_list, edge[0]) and is_connected(adj_list, edge[1])
def is_connected(adj_list, node):
''' Return whether there is an edge in adj_list connected to node.'''
return any(node in adj_list[key] for key in adj_list)
def get_edge_num(adj_list):
''' Return the total number of edges in the graph (adj_list).'''
return sum((len(v) for k, v in adj_list.iteritems()))
def prims(adj_list):
'''Return a minimum spanning tree of adj_list using Prim's algo.'''
adj_list = deepcopy(adj_list) # Since we need to modify adj_list
expected_mst_edges = (len(adj_list.keys()) - 1) * 2
mst = {adj_list.iterkeys().next(): []} # Choose any one node
for node in adj_list:
adj_list[node].sort(key=lambda x: x[1]) # Sort by weight edge
while get_edge_num(mst) < expected_mst_edges:
copy = {}
for node in mst: # Get the minimum edge among nodes in mst
copy[node] = adj_list[node]
min_edge = get_min_weight_edge(copy)
first_node = min_edge[0]
sec_node = min_edge[1]
weight = min_edge[2]
if not does_create_cycle(mst, min_edge): # Then include edge in MST
edge_first_node = (sec_node, weight)
edge_sec_node = (first_node, weight)
mst[first_node] = mst.get(first_node, []) + [(edge_first_node)]
mst[sec_node] = mst.get(sec_node, []) + [(edge_sec_node)]
adj_list[first_node].remove((sec_node, weight))
adj_list[sec_node].remove((first_node, weight))
return mst
def a_star(start, end, coords):
'''Return the shortest route from start coord to end coord using A*
algorithm'''
coords = deepcopy(coords) # Since we have to modify the coordinates
start = deepcopy(start)
start["g_score"], start["h_score"], start["f_score"] = 0, 0, 0
start["prev"] = []
visited = []
to_discover = [start]
while to_discover:
cur_node = get_min_fscore_coord(to_discover)
to_discover.remove(cur_node)
is_end = cur_node["is_end"]
x, y = cur_node['x'], cur_node['y']
if is_end: # Found finishing node
return cur_node["prev"] + [(x, y)]
to_discover += get_valid_neighbors(cur_node, end, coords, visited)
visited += [(x, y)]
return None
def get_min_fscore_coord(to_discover):
'''Return the coord with minimum f score in list to_discover '''
f_score = min(coord['f_score'] for coord in to_discover)
for coord in to_discover:
if coord['f_score'] == f_score:
return coord
return None
def get_valid_neighbors(cur_node, end, coords, visited):
''' Return list of valid neighbors for cur_node.'''
x, y = cur_node['x'], cur_node['y']
is_wall = cur_node["is_wall"]
valid_neighbors = []
to_check = [(x + 1, y), (x - 1, y), (x, y + 1), (x, y - 1)]
for c in to_check:
if is_in_board_and_not_visited(c, coords, visited):
coord = get_coord(c, coords)
coord['prev'] = cur_node['prev'] + [(x, y)]
update_scores(coord, cur_node, end)
valid_neighbors += [coord]
return valid_neighbors
def is_in_board_and_not_visited(coord, coords, visited):
'''Return if coord is inside the board, not a wall, and was not visited.'''
viable_coords = [(c['x'], c['y']) for c in coords if not c["is_wall"]]
return coord in viable_coords and coord not in visited
def get_coord(c, coordinates):
'''Return the dictionary coord for tuple c inside coordinates.'''
for coord in coordinates:
if coord['x'] == c[0] and coord['y'] == c[1]:
return coord
return None
def calculate_hscore(cur_node, end_node):
'''Return the h score of cur_node.'''
x_dist = abs(cur_node['x'] - end_node['x'])
y_dist = abs(cur_node['y'] - end_node['y'])
return x_dist + y_dist
def update_scores(coord, visiting_coord, end_node):
'''Update h score for coord.'''
coord['g_score'] = visiting_coord['g_score'] + 1
coord['h_score'] = calculate_hscore(coord, end_node)
coord['f_score'] = coord['g_score'] + coord['h_score']
def topological_sort(prereq_task):
def get_task_prereq(prereq_task):
task_prereq = {c: [] for c in prereq_task}
for prereq, rely_prereq in prereq_task.iteritems():
for task in rely_prereq:
task_prereq[task].append(prereq)
return task_prereq
task_prereq = get_task_prereq(prereq_task)
todo = [c for c, deps in task_prereq.iteritems() if not deps]
order = []
while todo:
prereq = todo.pop()
order.append(prereq)
for t in prereq_task[prereq]: # These tasks rely on pre-req
task_prereq[t].remove(prereq)
if not task_prereq[t]:
todo.append(t)
return order
def shortest_paths(graph, start):
# Dijkstra's algorithm for single source shortest paths
# O(|V| log |V| + |E| log |V|)
paths = {node: None for node in graph}
distances = {node: float('inf') for node in graph}
distances[start] = 0
pq = PriorityQueue()
for node in graph:
if node == start:
pq.put(node, 0)
else:
pq.put(node, float('inf'))
while not pq.is_empty():
node, distance = pq.get()
for neighbour, edge_cost in graph[node]:
new_cost = distance + edge_cost
if new_cost < distances[neighbour]:
distances[neighbour] = new_cost
paths[neighbour] = node
pq.update(neighbour, new_cost)
return paths, distances
|
1658116
|
import math
from procgame import game,dmd
import ep
class InitialEntryMode(game.Mode):
"""Mode that prompts the player for their initials.
*left_text* and *right_text* are strings or arrays to be displayed at the
left and right corners of the display. If they are arrays they will be
rotated.
:attr:`entered_handler` is called once the initials have been confirmed.
This mode does not remove itself; this should be done in *entered_handler*."""
entered_handler = None
"""Method taking two parameters: `mode` and `inits`."""
char_back = '{'
char_done = '}'
init_font = None
font = None
letters_font = None
def __init__(self, game, priority, left_text, right_text, entered_handler,max_inits,extended=False):
super(InitialEntryMode, self).__init__(game, priority)
self.entered_handler = entered_handler
self.init_font = self.game.assets.font_09Bx7
self.font = self.game.assets.font_07x5
self.letters_font = self.game.assets.font_07x5
self.layer = dmd.GroupedLayer(128, 32)
self.layer.opaque = True
self.layer.layers = []
self.knocks = 0
self.max_inits = max_inits
self.extended = extended
if type(right_text) != list:
right_text = [right_text]
if type(left_text) != list:
left_text = [left_text,"MAX " + str(self.max_inits)]
seconds_per_text = 1.5
script = []
for text in left_text:
frame = dmd.Frame(width=128, height=8)
self.font.draw(frame, text, 0, 0,color=ep.YELLOW)
script.append({'seconds':seconds_per_text, 'layer':dmd.FrameLayer(frame=frame)})
topthird_left_layer = dmd.ScriptedLayer(width=128, height=8, script=script)
topthird_left_layer.composite_op = 'blacksrc'
self.layer.layers += [topthird_left_layer]
script = []
for text in right_text:
frame = dmd.Frame(width=128, height=8)
self.font.draw(frame, text, 128-(self.font.size(text)[0]), 0,color=ep.ORANGE)
script.append({'seconds':seconds_per_text, 'layer':dmd.FrameLayer(frame=frame)})
if text == "Grand Champion":
self.knocks += 2
elif text == 'High Score #1' or \
text == 'High Score #2' or \
text == 'High Score #3' or \
text == 'High Score #4':
self.knocks += 1
topthird_right_layer = dmd.ScriptedLayer(width=128, height=8, script=script)
topthird_right_layer.composite_op = 'blacksrc'
self.layer.layers += [topthird_right_layer]
self.inits_frame = dmd.Frame(width=128, height=10)
inits_layer = dmd.FrameLayer(opaque=False, frame=self.inits_frame)
inits_layer.set_target_position(0, 11)
self.layer.layers += [inits_layer]
self.lowerhalf_layer = dmd.FrameQueueLayer(opaque=False, hold=True)
self.lowerhalf_layer.set_target_position(0, 24)
self.layer.layers += [self.lowerhalf_layer]
self.letters = []
for idx in range(26):
self.letters += [chr(ord('A')+idx)]
self.letters += [' ', '.']
if self.extended:
self.letters += ['0','1','2','3','4','5','6','7','8','9','(',')','@','*','&','<','>','=','^','/','-','+','!','$','"',"'"]
self.letters += [self.char_back, self.char_done]
self.current_letter_index = 0
self.inits = self.letters[self.current_letter_index]
self.animate_to_index(0)
def mode_started(self):
pass
def mode_stopped(self):
pass
def animate_to_index(self, new_index, inc = 0):
letter_spread = 10
letter_width = 7
if inc < 0:
rng = range(inc * letter_spread, 1)
elif inc > 0:
rng = range(inc * letter_spread)[::-1]
else:
rng = [0]
#print rng
for x in rng:
frame = dmd.Frame(width=128, height=10)
for offset in range(-7, 8):
index = new_index - offset
#print "Index %d len=%d" % (index, len(self.letters))
if index < 0:
index = len(self.letters) + index
elif index >= len(self.letters):
index = index - len(self.letters)
(w, h) = self.font.size(self.letters[index])
#print "Drawing %d w=%d" % (index, w)
self.letters_font.draw(frame, self.letters[index], 128/2 - offset * letter_spread - letter_width/2 + x, 0,color=ep.CYAN)
frame.fill_rect(64-5, 0, 1, 10, 10)
frame.fill_rect(64+5, 0, 1, 10, 10)
self.lowerhalf_layer.frames += [frame]
self.current_letter_index = new_index
# Prune down the frames list so we don't get too far behind while animating
x = 0
while len(self.lowerhalf_layer.frames) > 15 and x < (len(self.lowerhalf_layer.frames)-1):
del self.lowerhalf_layer.frames[x]
x += 2
# Now draw the top right panel, with the selected initials in order:
self.inits_frame.clear()
init_spread = 8
x_offset = self.inits_frame.width/2 - len(self.inits) * init_spread / 2
for x in range(len(self.inits)):
self.init_font.draw(self.inits_frame, self.inits[x], x * init_spread + x_offset, 0,color=ep.GREEN)
self.inits_frame.fill_rect((len(self.inits)-1) * init_spread + x_offset, 9, 8, 1, 1)
def letter_increment(self, inc):
new_index = (self.current_letter_index + inc)
if new_index < 0:
new_index = len(self.letters) + new_index
elif new_index >= len(self.letters):
new_index = new_index - len(self.letters)
#print("letter_increment %d + %d = %d" % (self.current_letter_index, inc, new_index))
self.inits = self.inits[:-1] + self.letters[new_index]
self.animate_to_index(new_index, inc)
def letter_accept(self):
letter = self.letters[self.current_letter_index]
if letter == self.char_back:
if len(self.inits) > 0:
self.inits = self.inits[:-1]
elif letter == self.char_done or len(self.inits) > self.max_inits:
#print "Ending entry"
self.inits = self.inits[:-1] # Strip off the done character
if self.entered_handler != None:
self.entered_handler(mode=self, inits=self.inits)
# fire the knocker if we have some to fire
if self.knocks:
self.game.interrupter.knock(self.knocks)
else:
self.game.logger.warning('InitialEntryMode finished but no entered_handler to notify!')
else:
self.inits += letter
# if we're on the third letter, jump to the accept
if len(self.inits) == (self.max_inits + 1):
if self.extended:
self.current_letter_index = 45
else:
self.current_letter_index = 29
self.letter_increment(0)
def sw_flipperLwL_active(self, sw):
self.periodic_left()
return game.SwitchStop
def sw_flipperLwL_inactive(self, sw):
self.cancel_delayed('periodic_movement')
return game.SwitchStop
def sw_flipperLwR_active(self, sw):
self.periodic_right()
return game.SwitchStop
def sw_flipperLwR_inactive(self, sw):
self.cancel_delayed('periodic_movement')
return game.SwitchStop
def periodic_left(self):
self.letter_increment(-1)
self.delay(name='periodic_movement', event_type=None, delay=0.2, handler=self.periodic_left)
def periodic_right(self):
self.letter_increment(1)
self.delay(name='periodic_movement', event_type=None, delay=0.2, handler=self.periodic_right)
def sw_startButton_active(self, sw):
self.letter_accept()
return game.SwitchStop
|
1658125
|
from .scenario import Scenario
from .scenario import SetMapfileDirectory
from .scenario import GetMapfileDirectory
__all__ = [
"Scenario",
"SetMapfileDirectory",
"GetMapfileDirectory"
]
|
1658173
|
import numpy as np
from PIL import Image
import glob
import torch
import torch.nn as nn
from torch.autograd import Variable
from random import randint
from torch.utils.data.dataset import Dataset
from pre_processing import *
from mean_std import *
Training_MEAN = 0.4911
Training_STDEV = 0.1658
class SEMDataTrain(Dataset):
def __init__(self, image_path, mask_path, in_size=572, out_size=388):
"""
Args:
image_path (str): the path where the image is located
mask_path (str): the path where the mask is located
option (str): decide which dataset to import
"""
# all file names
self.mask_arr = glob.glob(str(mask_path) + "/*")
self.image_arr = glob.glob(str(image_path) + str("/*"))
self.in_size, self.out_size = in_size, out_size
# Calculate len
self.data_len = len(self.mask_arr)
# calculate mean and stdev
def __getitem__(self, index):
"""Get specific data corresponding to the index
Args:
index (int): index of the data
Returns:
Tensor: specific data on index which is converted to Tensor
"""
"""
# GET IMAGE
"""
single_image_name = self.image_arr[index]
img_as_img = Image.open(single_image_name)
# img_as_img.show()
img_as_np = np.asarray(img_as_img)
# Augmentation
# flip {0: vertical, 1: horizontal, 2: both, 3: none}
flip_num = randint(0, 3)
img_as_np = flip(img_as_np, flip_num)
# Noise Determine {0: Gaussian_noise, 1: uniform_noise
if randint(0, 1):
# Gaussian_noise
gaus_sd, gaus_mean = randint(0, 20), 0
img_as_np = add_gaussian_noise(img_as_np, gaus_mean, gaus_sd)
else:
# uniform_noise
l_bound, u_bound = randint(-20, 0), randint(0, 20)
img_as_np = add_uniform_noise(img_as_np, l_bound, u_bound)
# Brightness
pix_add = randint(-20, 20)
img_as_np = change_brightness(img_as_np, pix_add)
# Elastic distort {0: distort, 1:no distort}
sigma = randint(6, 12)
# sigma = 4, alpha = 34
img_as_np, seed = add_elastic_transform(img_as_np, alpha=34, sigma=sigma, pad_size=20)
# Crop the image
img_height, img_width = img_as_np.shape[0], img_as_np.shape[1]
pad_size = int((self.in_size - self.out_size)/2)
img_as_np = np.pad(img_as_np, pad_size, mode="symmetric")
y_loc, x_loc = randint(0, img_height-self.out_size), randint(0, img_width-self.out_size)
img_as_np = cropping(img_as_np, crop_size=self.in_size, dim1=y_loc, dim2=x_loc)
'''
# Sanity Check for image
img1 = Image.fromarray(img_as_np)
img1.show()
'''
# Normalize the image
img_as_np = normalization2(img_as_np, max=1, min=0)
img_as_np = np.expand_dims(img_as_np, axis=0) # add additional dimension
img_as_tensor = torch.from_numpy(img_as_np).float() # Convert numpy array to tensor
"""
# GET MASK
"""
single_mask_name = self.mask_arr[index]
msk_as_img = Image.open(single_mask_name)
# msk_as_img.show()
msk_as_np = np.asarray(msk_as_img)
# flip the mask with respect to image
msk_as_np = flip(msk_as_np, flip_num)
# elastic_transform of mask with respect to image
# sigma = 4, alpha = 34, seed = from image transformation
msk_as_np, _ = add_elastic_transform(
msk_as_np, alpha=34, sigma=sigma, seed=seed, pad_size=20)
msk_as_np = approximate_image(msk_as_np) # images only with 0 and 255
# Crop the mask
msk_as_np = cropping(msk_as_np, crop_size=self.out_size, dim1=y_loc, dim2=x_loc)
'''
# Sanity Check for mask
img2 = Image.fromarray(msk_as_np)
img2.show()
'''
# Normalize mask to only 0 and 1
msk_as_np = msk_as_np/255
# msk_as_np = np.expand_dims(msk_as_np, axis=0) # add additional dimension
msk_as_tensor = torch.from_numpy(msk_as_np).long() # Convert numpy array to tensor
return (img_as_tensor, msk_as_tensor)
def __len__(self):
"""
Returns:
length (int): length of the data
"""
return self.data_len
class SEMDataVal(Dataset):
def __init__(self, image_path, mask_path, in_size=572, out_size=388):
'''
Args:
image_path = path where test images are located
mask_path = path where test masks are located
'''
# paths to all images and masks
self.mask_arr = glob.glob(str(mask_path) + str("/*"))
self.image_arr = glob.glob(str(image_path) + str("/*"))
self.in_size = in_size
self.out_size = out_size
self.data_len = len(self.mask_arr)
def __getitem__(self, index):
"""Get specific data corresponding to the index
Args:
index : an integer variable that calls (indext)th image in the
path
Returns:
Tensor: 4 cropped data on index which is converted to Tensor
"""
single_image = self.image_arr[index]
img_as_img = Image.open(single_image)
# img_as_img.show()
# Convert the image into numpy array
img_as_np = np.asarray(img_as_img)
# Make 4 cropped image (in numpy array form) using values calculated above
# Cropped images will also have paddings to fit the model.
pad_size = int((self.in_size - self.out_size)/2)
img_as_np = np.pad(img_as_np, pad_size, mode="symmetric")
img_as_np = multi_cropping(img_as_np,
crop_size=self.in_size,
crop_num1=2, crop_num2=2)
# Empty list that will be filled in with arrays converted to tensor
processed_list = []
for array in img_as_np:
# SANITY CHECK: SEE THE CROPPED & PADDED IMAGES
#array_image = Image.fromarray(array)
# Normalize the cropped arrays
img_to_add = normalization2(array, max=1, min=0)
# Convert normalized array into tensor
processed_list.append(img_to_add)
img_as_tensor = torch.Tensor(processed_list)
# return tensor of 4 cropped images
# top left, top right, bottom left, bottom right respectively.
"""
# GET MASK
"""
single_mask_name = self.mask_arr[index]
msk_as_img = Image.open(single_mask_name)
# msk_as_img.show()
msk_as_np = np.asarray(msk_as_img)
# Normalize mask to only 0 and 1
msk_as_np = multi_cropping(msk_as_np,
crop_size=self.out_size,
crop_num1=2, crop_num2=2)
msk_as_np = msk_as_np/255
# msk_as_np = np.expand_dims(msk_as_np, axis=0) # add additional dimension
msk_as_tensor = torch.from_numpy(msk_as_np).long() # Convert numpy array to tensor
original_msk = torch.from_numpy(np.asarray(msk_as_img))
return (img_as_tensor, msk_as_tensor, original_msk)
def __len__(self):
return self.data_len
class SEMDataTest(Dataset):
def __init__(self, image_path, in_size=572, out_size=388):
'''
Args:
image_path = path where test images are located
mask_path = path where test masks are located
'''
# paths to all images and masks
self.image_arr = glob.glob(str(image_path) + str("/*"))
self.in_size = in_size
self.out_size = out_size
self.data_len = len(self.image_arr)
def __getitem__(self, index):
'''Get specific data corresponding to the index
Args:
index: an integer variable that calls(indext)th image in the
path
Returns:
Tensor: 4 cropped data on index which is converted to Tensor
'''
single_image = self.image_arr[index]
img_as_img = Image.open(single_image)
# img_as_img.show()
# Convert the image into numpy array
img_as_np = np.asarray(img_as_img)
pad_size = int((self.in_size - self.out_size)/2)
img_as_np = np.pad(img_as_np, pad_size, mode="symmetric")
img_as_np = multi_cropping(img_as_np,
crop_size=self.in_size,
crop_num1=2, crop_num2=2)
# Empty list that will be filled in with arrays converted to tensor
processed_list = []
for array in img_as_np:
# SANITY CHECK: SEE THE PADDED AND CROPPED IMAGES
# array_image = Image.fromarray(array)
# Normalize the cropped arrays
img_to_add = normalization2(array, max=1, min=0)
# Convert normalized array into tensor
processed_list.append(img_to_add)
img_as_tensor = torch.Tensor(processed_list)
# return tensor of 4 cropped images
# top left, top right, bottom left, bottom right respectively.
return img_as_tensor
def __len__(self):
return self.data_len
if __name__ == "__main__":
SEM_train = SEMDataTrain(
'../data/train/images', '../data/train/masks')
SEM_test = SEMDataTest(
'../data/test/images/', '../data/test/masks')
SEM_val = SEMDataVal('../data/val/images', '../data/val/masks')
imag_1, msk = SEM_train.__getitem__(0)
|
1658174
|
import hdmi
try:
from setuptools import setup, find_packages
except ImportError:
from distutils.core import setup
from pkgutil import walk_packages
def _find_packages(path='.', prefix=''):
yield prefix
prefix += "."
for _, name, is_package in walk_packages(path,
prefix,
onerror=lambda x: x):
if is_package:
yield name
def find_packages():
return list(_find_packages(hdmi.__path__, hdmi.__name__))
setup(name='hdmi',
version=hdmi.__version__,
install_requires=['myhdl >= 1.0.dev0'],
description='Implementation of HDMI Source/Sink Modules in MyHDL',
url='https://github.com/srivatsan-ramesh/HDMI-Source-Sink-Modules',
author='srivatsan-ramesh',
author_email='<EMAIL>',
license='MIT',
packages=find_packages(),
zip_safe=False)
|
1658204
|
import re
import sys
def copy_board(board, sets):
"""Return a copy of board setting new squares from 'sets' dictionary."""
return [[sets.get((r, c), board[r][c]) for c in range(9)] for r in range(9)]
def get_alternatives_for_square(board, nrow, ncolumn):
"""Return sequence of valid digits for square (nrow, ncolumn) in board."""
def _box(idx, size=3):
"""Return indexes to cover a box (3x3 sub-matrix of a board)."""
start = (idx // size) * size
return range(start, start + size)
nums_in_box = [board[r][c] for r in _box(nrow) for c in _box(ncolumn)]
nums_in_row = [board[nrow][c] for c in range(9)]
nums_in_column = [board[r][ncolumn] for r in range(9)]
nums = nums_in_box + nums_in_row + nums_in_column
return sorted(set(range(1, 9+1)) - set(nums))
def get_more_constrained_square(board):
"""Get the square in board with more constrains (less alternatives)."""
ranges = ((x, y) for x in range(9) for y in range(9))
constrains = [(len(get_alternatives_for_square(board, r, c)), (r, c))
for (r, c) in ranges if not board[r][c]]
if constrains:
return min(constrains)[1]
def solve(board):
"""Return a solved Sudoku board (None if no solution was found)."""
pos = get_more_constrained_square(board)
if not pos:
return board # all squares are filled, so this board is the solution
nrow, ncolumn = pos
for test_digit in get_alternatives_for_square(board, nrow, ncolumn):
test_board = copy_board(board, {(nrow, ncolumn): test_digit})
solved_board = solve(test_board)
if solved_board:
return solved_board
def lines2board(lines):
"""Parse a text board stripping spaces and setting 0's for empty squares."""
spaces = re.compile("\s+")
return [[(int(c) if c in "123456789" else 0) for c in spaces.sub("", line)]
for line in lines if line.strip()]
def main(args):
"""Solve a Sudoku board read from a text file."""
from pprint import pprint
path, = args
board = lines2board(open(path))
pprint(board)
pprint(solve(board))
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
|
1658241
|
import random
import sys
from PIL import Image, ImageDraw, ImageFont
import numpy as np
FONT_PATH = "/mnt/c/Windows/Fonts/"
fonts = []
fonts.append("NIS_R10N.ttc")
fonts.append("NIS_R10N.ttc")
fonts.append("NIS_SAI8N.ttc")
num_images = 10000
#num_images = 100
images = np.empty((num_images, 28, 28), dtype=np.uint8)
KANAS = "あいうえおかきくけこ"
chars = [ord(c) for c in list(KANAS)]
for i in range(num_images):
img = Image.new('L', (28, 28))
draw = ImageDraw.Draw(img)
f = FONT_PATH+random.choice(fonts)
draw.font = ImageFont.truetype(f, 26)
theta = random.randint(-5, 5)
kana = chr(random.choice(chars))
x = random.randint(0, 2)
y = random.randint(0, 2)
draw.text((x, y), kana, (255))
img = img.rotate(theta)
nim = np.array(img)
nim = nim.reshape((28, 28))
images[i:] = nim
sys.stdout.write('\r>> Generating image %d/%d' % (i + 1, num_images))
sys.stdout.flush()
if i < 100:
filename = "test%04d.png" % i
img.save(filename)
np.save("hiragana", images)
print(images.shape)
|
1658259
|
import math
import numpy as np
import torch
from torch.utils.data import DataLoader
from torch.utils.data.sampler import SubsetRandomSampler, SequentialSampler
from sklearn.model_selection import StratifiedKFold
import dgl
def collate(samples):
# 'samples (graph, label)'
graphs, labels = map(list, zip(*samples))
for g in graphs:
for key in g.node_attr_schemes().keys():
g.ndata[key] = g.ndata[key].float()
batched_graph = dgl.batch(graphs)
labels = torch.tensor(labels)
return batched_graph, labels
class GraphDataLoader():
def __init__(self, dataset, batch_size, device,
collate_fn=collate, seed=0, shuffle=True,
split_name='fold10', fold_idx=0, split_ratio=0.7):
self.shuffle = shuffle
self.seed = seed
self.kwargs = {'pin_memory': True} if device >= 0 else {}
labels = [l for _, l in dataset]
if split_name == 'fold10':
train_idx, valid_idx = self._split_fold10(
labels, fold_idx, seed, shuffle
)
elif split_name == 'rand':
train_idx, valid_idx = self._split_rand(
labels, split_ratio, seed, shuffle
)
else:
raise NotImplementedError()
train_sampler = SubsetRandomSampler(train_idx)
valid_sampler = SubsetRandomSampler(valid_idx)
self.train_loader = DataLoader(
dataset, sampler=train_sampler,
batch_size=batch_size, collate_fn=collate_fn, **self.kwargs
)
self.valid_loader = DataLoader(
dataset, sampler=valid_sampler,
batch_size=batch_size, collate_fn=collate_fn, **self.kwargs
)
def train_valid_loader(self):
return self.train_loader, self.valid_loader
def _split_fold10(self, labels, fold_idx=0, seed=0, shuffle=True):
assert 0 <= fold_idx and fold_idx < 10, print(
'fold_idx must be from 0 to 9.'
)
skf = StratifiedKFold(n_splits=10, shuffle=shuffle, random_state=seed)
idx_list = []
for idx in skf.split(np.zeros(len(labels)), labels):
idx_list.append(idx)
train_idx, valid_idx = idx_list[fold_idx]
print(
'train_set: test_set = %d : %d' % (len(train_idx), len(valid_idx))
)
return train_idx, valid_idx
def _split_rand(self, labels, split_ratio=0.7, seed=0, shuffle=True):
num_entries = len(labels)
indices = list(range(num_entries))
np.random.seed(seed)
np.random.shuffle(indices)
split = int(math.floor(split_ratio * num_entries))
train_idx, valid_idx = indices[:split], indices[split:]
print(
'train_set: test_set = %d : %d' % (len(train_idx), len(valid_idx))
)
return train_idx, valid_idx
if __name__ == '__main__':
from Temp.dataset import GINDataset
dataset = GINDataset(name='PROTEINS', self_loop=True, degree_as_nlabel=False)
Loader_list = []
for idx in range(10):
train_loader, valid_loader = GraphDataLoader(
dataset, batch_size=128, device=0, collate_fn=collate,
seed=9, shuffle=True, split_name='fold10', fold_idx=idx
).train_valid_loader()
Loader_list.append((train_loader, valid_loader))
print(Loader_list)
|
1658271
|
import rlp
from quarkchain.utils import sha3_256
class FakeHeader():
""" A Fake Minor Block Header
TODO: Move non-root-chain
"""
def __init__(self, hash=b'\x00' * 32, number=0, timestamp=0, difficulty=1,
gas_limit=3141592, gas_used=0, uncles_hash=sha3_256(rlp.encode([]))):
self.hash = hash
self.number = number
self.timestamp = timestamp
self.difficulty = difficulty
self.gas_limit = gas_limit
self.gas_used = gas_used
self.uncles_hash = uncles_hash
def get_hash(self):
return self.hash
|
1658274
|
import os
import pytest
from rdkit import Chem
from pysmilesutils.augment import SMILESAugmenter, MolAugmenter
class TestRandomizer:
@pytest.fixture
def get_test_smiles(self):
try:
current_dir = os.path.dirname(__file__)
with open(os.path.join(current_dir, "./test_smiles.smi")) as file:
smiles_data = file.readlines()
test_smiles = [smi[:-1] for smi in smiles_data]
except FileNotFoundError:
print("Cannot find 'test_smiles.smi'")
return test_smiles
def get_num_new_random_smiles(self, test_smiles, smiles):
num_new = 0
for smi, smi_rand in zip(test_smiles, smiles):
if smi != smi_rand:
num_new += 1
return num_new
def test_smiles_majority_random_unrestricted(self, get_test_smiles):
"""Checks the `SMLIESRandomizer` by testing that when `restricted` is
`False` mostly (99%) of the SMILES randomized are distinct from the
canonical.
"""
smiles_randomizer_unrestricted = SMILESAugmenter(restricted=False)
randomized_smiles = smiles_randomizer_unrestricted(get_test_smiles)
num_new = self.get_num_new_random_smiles(get_test_smiles, randomized_smiles)
assert num_new / len(get_test_smiles) > 0.99
def test_smiles_majority_random_restricted(self, get_test_smiles):
"""Checks the `SMLIESRandomizer` by testing that when `restricted` is
`True` mostly (99%) of the SMILES randomized are distinct from the
canonical.
"""
smiles_randomizer_restricted = SMILESAugmenter(restricted=True)
randomized_smiles = smiles_randomizer_restricted(get_test_smiles)
num_new = self.get_num_new_random_smiles(get_test_smiles, randomized_smiles)
assert num_new / len(get_test_smiles) > 0.99
def test_mol_majority_random(self, get_test_smiles):
"""Checks the `MolRandomizer` by testing that mostly (99%) of the Mols
randomized are distinct from the original canonical.
"""
mol_randomizer = MolAugmenter()
mols = [Chem.MolFromSmiles(smi) for smi in get_test_smiles]
mols_randomized = mol_randomizer(mols)
randomized_smiles = [
Chem.MolToSmiles(mol, canonical=False) for mol in mols_randomized
]
num_new = self.get_num_new_random_smiles(get_test_smiles, randomized_smiles)
assert num_new / len(mols) > 0.99
def test_mol_equality_random(self, get_test_smiles):
"""Check molecular equivalence after randomization by canonicalizing"""
smiles_randomizer_unrestricted = SMILESAugmenter(restricted=False)
randomized_smiles = smiles_randomizer_unrestricted(get_test_smiles)
assert all(
[
Chem.MolToSmiles(Chem.MolFromSmiles(mol1)) == Chem.MolToSmiles(Chem.MolFromSmiles(mol2))
for mol1, mol2 in zip(randomized_smiles, get_test_smiles)
]
)
def test_mol_equality_restricted(self, get_test_smiles):
"""Check molecular equivalence after randomization by canonicalizing"""
smiles_randomizer_unrestricted = SMILESAugmenter(restricted=True)
randomized_smiles = smiles_randomizer_unrestricted(get_test_smiles)
assert all(
[
Chem.MolToSmiles(Chem.MolFromSmiles(mol1)) == Chem.MolToSmiles(Chem.MolFromSmiles(mol2))
for mol1, mol2 in zip(randomized_smiles, get_test_smiles)
]
)
def test_active(self, get_test_smiles):
"""Tests that the `active` property works, i.e, that when the augmenter is
not active it just returns the object that is input.
"""
randomizer = SMILESAugmenter()
smiles_rand = randomizer(get_test_smiles)
assert smiles_rand != get_test_smiles
randomizer.active = False
smiles_nonrand = randomizer(get_test_smiles)
assert smiles_nonrand == get_test_smiles
|
1658300
|
import sys
if sys.version_info < (3, 0):
import testcase
else:
from . import testcase
#
# This test function tests elements of the heading levels
#
class TestLevelChars(testcase.TestCase):
title = "Heading Level Characters"
def test_level_chars(self):
# Check for "#" as level char
self.set_text(self._test_text1())
self.set_settings({'level_char': '#'})
self.run_plugin()
self.find('* Heading 1')
# Check for "-" as level char
self.set_text(self._test_text2())
self.set_settings({'level_char': '-'})
self.run_plugin()
self.find('* Heading 1')
# Check for ":" as level char
self.set_text(self._test_text3())
self.set_settings({'level_char': ':'})
self.run_plugin()
self.find('* Heading 1')
#
# Initial text used in above tests
#
def _test_text1(self):
return """
/*
* TOC
*/
// # Heading 1
// ## Heading 2
"""
def _test_text2(self):
return """
/*
* TOC
*/
// - Heading 1
// -- Heading 2
"""
def _test_text3(self):
return """
/*
* TOC
*/
// : Heading 1
// :: Heading 2
"""
|
1658303
|
from abc import ABC
from typing import Optional, Sequence, Union
from allenact.base_abstractions.experiment_config import ExperimentConfig
from allenact.base_abstractions.preprocessor import Preprocessor
from allenact.base_abstractions.sensor import Sensor
from allenact.utils.experiment_utils import Builder
class PointNavBaseConfig(ExperimentConfig, ABC):
"""An Object Navigation experiment configuration in iThor."""
ADVANCE_SCENE_ROLLOUT_PERIOD: Optional[int] = None
PREPROCESSORS: Sequence[Union[Preprocessor, Builder[Preprocessor]]] = tuple()
SENSORS: Optional[Sequence[Sensor]] = None
STEP_SIZE = 0.25
ROTATION_DEGREES = 30.0
DISTANCE_TO_GOAL = 0.2
STOCHASTIC = True
CAMERA_WIDTH = 400
CAMERA_HEIGHT = 300
SCREEN_SIZE = 224
MAX_STEPS = 500
def __init__(self):
self.REWARD_CONFIG = {
"step_penalty": -0.01,
"goal_success_reward": 10.0,
"failed_stop_reward": 0.0,
"reached_max_steps_reward": 0.0,
"shaping_weight": 1.0,
}
|
1658327
|
from time import clock
from DaPy.core import LogInfo, Series
from .base import BaseEngineModel
class PageRank(BaseEngineModel):
def __init__(self, engine='numpy', random_walk_rate=0.85):
BaseEngineModel.__init__(self, engine)
self.random_walk_rate = random_walk_rate
@property
def random_walk_rate(self):
return self._alpha
@random_walk_rate.setter
def random_walk_rate(self, rate):
assert isinstance(rate, float)
assert 0 <= rate <= 1
self._alpha = rate
def __setstate__(self, args):
BaseEngineModel.__setstate__(self, args)
self._alpha = args['_alpha']
def __call__(self, X_mat, stochastic_matrix=None, min_error=0.0001, max_iter=1000):
return self.transform(X_mat, stochastic_matrix, min_error, max_iter)
def transform(self, stochastic_matrix, init_weight=None, min_error=0.001, max_iter=1000):
if init_weight is None:
init_weight = Series([1.0 / len(stochastic_matrix)] * len(stochastic_matrix))
init_weight = self._mat(init_weight).T
if stochastic_matrix is False:
weight = self._weight
self._weight = weight = self._mat(stochastic_matrix)
assert isinstance(max_iter, int) and max_iter >= 1
assert init_weight.shape[1] == 1, '`init_weight` should be 1-D sequence'
assert init_weight.shape[0] == weight.shape[1], 'items in init_weight not fit the shape of weight matrix'
for round_ in range(max_iter):
X_next = self._alpha * self._dot(weight, init_weight) + (1.0 - self._alpha) / init_weight.shape[0]
error = self._sum(self._abs(X_next - init_weight))
init_weight = X_next
if error < min_error:
## LogInfo(' Early stopped iteration')
break
return Series(init_weight.T.tolist()[0])
if __name__ == '__main__':
weight = [
[0, 0.9, 0, 0],
[0.333, 0, 0, 0.5],
[0.333, 0, 1, 0.5],
[0.333, 0.5, 0, 0]
]
initial = [0.25, 1, 0.25, 0.25]
pageranker = PageRank("numpy")
print(pageranker(initial, weight))
|
1658367
|
from cherry.envs.atari import AtariEnvironment
from cherry.envs.doom import DoomEnvironment
from cherry.envs.classic_control import ClassicControlEnvironment
from cherry.envs.pybullet_robotics import PyBulletRoboticsEnvironment
from utils.helpers import get_logger
logger = get_logger(__name__)
ENVS = {'atari': AtariEnvironment,
'doom': DoomEnvironment,
'classic_control': ClassicControlEnvironment,
'pybullet-robotics': PyBulletRoboticsEnvironment}
def build_env(cfgs):
try:
env = ENVS.get(cfgs['type'])
return env(cfgs)
except Exception as err:
logger.error('Error setting up env {}, {}'.format(cfgs['type'], err))
|
1658391
|
import socket
class SimpleConfig(object):
"Simple configuration file parser"
def __init__(self, filename):
self.filename = filename
self.load()
def load(self):
items = {}
with open(self.filename) as fh:
for line in fh:
# Clean up line, remove comments
line = line.strip()
if "#" in line:
line = line[:line.index("#")].strip()
# Get the values
if line:
try:
variable, value = line.split("=", 1)
except ValueError:
raise ValueError("Bad config line (no = and not a comment): %s" % line)
items.setdefault(variable.strip().lower(), set()).add(value.strip())
# Save to ourselves
self.items = items
def __getitem__(self, item):
values = self.items[item]
if len(values) > 1:
raise ValueError("More than one value specified for %s" % item)
return list(values)[0]
def get(self, item, default=None):
values = self.items.get(item, set())
if len(values) == 0:
return default
if len(values) > 1:
raise ValueError("More than one value specified for %s" % item)
return list(values)[0]
def get_int(self, item, default):
return int(self.get(item, default))
def get_all(self, item):
return self.items.get(item, set())
def get_all_addresses(self, item, default=None):
addresses = set()
for value in self.get_all(item):
try:
address, port = value.rsplit(":", 1)
family = socket.AF_INET
except ValueError:
raise ValueError("Invalid address (no port found): %s" % value)
if address[0] == "[":
address = address.strip("[]")
family = socket.AF_INET6
if address == "*":
address = "::"
family = socket.AF_INET6
addresses.add(((address, int(port)), family))
if not addresses:
addresses = default or set()
return addresses
|
1658430
|
from __future__ import division
import logging
from time import time
from math import ceil, sqrt
from collections import defaultdict
from feemodel.util import StoppableThread, DataSample
from feemodel.simul import Simul
from feemodel.simul.stats import WaitFn
from feemodel.simul.transient import transientsim
from feemodel.app.predict import WAIT_PERCENTILE_PTS, TxPrediction
from feemodel.config import EXPECTED_BLOCK_INTERVAL, MINRELAYTXFEE
default_update_period = 60.
default_miniters = 2000
default_maxiters = 10000
logger = logging.getLogger(__name__)
class TransientOnline(StoppableThread):
def __init__(self, mempool, poolsonline, txonline,
update_period=default_update_period,
miniters=default_miniters,
maxiters=default_maxiters,
numprocesses=None):
self.mempool = mempool
self.txonline = txonline
self.poolsonline = poolsonline
self.update_period = update_period
self.miniters = miniters
self.maxiters = maxiters
self.numprocesses = numprocesses
self.stats = None
super(TransientOnline, self).__init__()
@StoppableThread.auto_restart(60)
def run(self):
logger.info("Starting transient online sim.")
while not self.is_stopped():
try:
self.update()
except StopIteration:
pass
self.sleep_till_next()
logger.info("Stopped transient online sim.")
# Ensures that Prediction.update_predictions doesn't get outdated
# values, if this thread has bugged out
self.stats = None
def sleep_till_next(self):
'''Sleep till the next update.'''
stats = self.stats
if stats is not None:
time_till_next = max(
stats.timestamp + self.update_period - time(), 0)
self.sleep(time_till_next)
def update(self):
pools, tx_source, mempoolstate = self._get_resources()
sim = Simul(pools, tx_source)
feepoints = self.calc_feepoints(sim, mempoolstate)
init_entries = remove_lowfee(mempoolstate.entries, sim.stablefeerate)
stats = TransientStats()
feepoints, waittimes = transientsim(
sim,
feepoints=feepoints,
init_entries=init_entries,
miniters=self.miniters,
maxiters=self.maxiters,
maxtime=self.update_period,
numprocesses=self.numprocesses,
stopflag=self.get_stop_object())
stats.record_waittimes(feepoints, waittimes)
logger.debug("Finished transient sim in %.2fs and %d iterations" %
(stats.timespent, stats.numiters))
# Warn if we reached miniters
if stats.timespent > 1.1*self.update_period:
logger.warning("Transient sim took %.2fs to do %d iters." %
(stats.timespent, stats.numiters))
self.stats = stats
def _get_resources(self):
"""Get transient sim resources.
Get the SimPools, SimTxSource, and MempoolState objects. If any are
not ready, retry every 5 seconds.
"""
while not self.is_stopped():
pools = self.poolsonline.get_pools()
tx_source = self.txonline.get_txsource()
mempoolstate = self.mempool.state
if mempoolstate and pools and tx_source:
return pools, tx_source, mempoolstate
# Resources aren't available due to some error elsewhere,
# so get rid of stats to avoid giving stale stats to others.
self.stats = None
self.sleep(5)
raise StopIteration
def calc_feepoints(self, sim, mempoolstate,
max_wait_delta=60, min_num_pts=20):
"""Get feepoints at which to evaluate wait times.
The feepoints are chosen so that the wait times are approximately
evenly spaced, 1 min apart. This is done by linear interpolation
of previous wait times.
If not stats have been computed yet, return None (i.e. use the
default feepoints computed by transientsim)
"""
mempool_sizefn = mempoolstate.get_sizefn()
maxcap = sim.cap.capfn[-1][1]
minfeepoint = None
txratepts = list(sim.cap.txbyteratefn)
txratepts.append((MINRELAYTXFEE, sim.cap.txbyteratefn(MINRELAYTXFEE)))
txratepts.sort()
for feerate, txbyterate in txratepts:
if feerate < sim.stablefeerate:
continue
capdelta = maxcap - txbyterate
assert capdelta > 0
mempoolsize = mempool_sizefn(feerate)
if mempoolsize / capdelta < 10800:
# Roughly 3 hours to clear
minfeepoint = feerate
break
if minfeepoint is None:
minfeepoint = feerate
# No need to process transactions with fee rate lower than minfeepoint
sim.stablefeerate = max(sim.stablefeerate, minfeepoint)
if not self.stats:
# Use default feepoints - even spacing
return None
waitfn = self.stats.expectedwaits
minwait = waitfn._y[-1]
maxwait = waitfn._y[0]
wait_delta = min(max_wait_delta,
(maxwait - minwait) / (min_num_pts - 1))
wait_delta = max(wait_delta, 1)
num_pts = 1 + int(round((maxwait - minwait) / wait_delta))
wait_pts = [minwait + wait_delta*i for i in range(num_pts)]
feepoints = [int(round(waitfn.inv(wait))) for wait in wait_pts]
maxfeepoint = sim.cap.inv_util(0.05)
# maxfeepoint must also be at least the 0.95 cap feerate
for feerate, cap in sim.cap.capfn:
if cap >= 0.95*maxcap:
alt_maxfeepoint = feerate
break
# maxfeepoint must also be at least so that mempoolsize is "small"
alt_maxfeepoint2 = int(mempool_sizefn.inv(
0.1*maxcap*EXPECTED_BLOCK_INTERVAL, use_upper=True))
maxfeepoint = max(maxfeepoint, alt_maxfeepoint, alt_maxfeepoint2)
minfeepoint = sim.stablefeerate
feepoints.extend([minfeepoint, maxfeepoint])
feepoints = filter(
lambda feerate: minfeepoint <= feerate <= maxfeepoint,
sorted(set(feepoints)))
return feepoints
def get_stats(self):
stats = {
'params': {
'miniters': self.miniters,
'maxiters': self.maxiters,
'update_period': self.update_period
}
}
tstats = self.stats
if tstats is not None:
stats.update(tstats.get_stats())
return stats
class TransientStats(object):
def __init__(self):
self.timestamp = time()
def record_waittimes(self, feepoints, waittimes):
self.timespent = time() - self.timestamp
self.numiters = len(waittimes[0])
expectedwaits = []
expectedwaits_err = []
waitpercentiles = []
for waitsample in waittimes:
waitdata = DataSample(waitsample)
waitdata.calc_stats()
expectedwaits.append(waitdata.mean)
expectedwaits_err.append(waitdata.std / sqrt(self.numiters))
waitpercentiles.append(
[waitdata.get_percentile(p) for p in WAIT_PERCENTILE_PTS])
self.feepoints = feepoints
self.expectedwaits = WaitFn(feepoints, expectedwaits,
expectedwaits_err)
self.waitmatrix = [WaitFn(feepoints, w) for w in zip(*waitpercentiles)]
def predict(self, feerate, currtime):
'''Predict the wait time of a transaction with specified feerate.
entry is a mementry object. Returns a TxPrediction object.
'''
if feerate < self.feepoints[0]:
return None
waitpercentiles = [w(feerate) for w in self.waitmatrix]
return TxPrediction(waitpercentiles, feerate, currtime)
def estimatefee(self, waitminutes):
feerate = self.expectedwaits.inv(waitminutes*60)
if feerate is not None:
feerate = int(ceil(feerate))
return feerate
def decidefee(self, txsize, ten_minute_cost, waitcostfn="quadratic"):
"""Compute the optimal transaction fee.
The cost of a transaction is modeled as:
C = txfee + f(waittime)
where f, the wait cost function, is non-decreasing and f(0) = 0.
This method thus computes the optimal fee (not feerate) in satoshis,
with respect to expected cost, for a transaction of size <txsize>
and a given wait cost function f. We restrict f by the following
two parameters:
1. <ten_minute_cost>: the cost in satoshis of a wait time of 10 min.
2. <waitcostfn> in ('linear', 'quadratic'): specify whether f is
linear or quadratic in the wait time.
In the future perhaps this method could be generalized to accept
arbitrary wait cost functions.
"""
if waitcostfn == "linear":
waitcosts = [meanwait / 600 * ten_minute_cost
for meanwait in self.expectedwaits.waits]
elif waitcostfn == "quadratic":
# mean squared wait = var(wait) + mean(wait)^2
meansq_waits = [
self.numiters*stderr**2 + meanwait**2
for stderr, meanwait in
zip(self.expectedwaits.errors, self.expectedwaits.waits)]
waitcosts = [meansq_wait / 360000 * ten_minute_cost
for meansq_wait in meansq_waits]
else:
raise ValueError("waitcostfn keyword arg must be "
"'linear' or 'quadratic'.")
C_array = [feerate*txsize/1000 + waitcost
for feerate, waitcost in zip(self.feepoints, waitcosts)]
bestidx = min(enumerate(C_array), key=lambda c: c[1])[0]
C = C_array[bestidx]
best_feerate = self.feepoints[bestidx]
best_fee = int(ceil(best_feerate * txsize / 1000))
expectedwait = self.expectedwaits.waits[bestidx]
return best_fee, expectedwait, C
def get_stats(self):
stats = {
'timestamp': self.timestamp,
'timespent': self.timespent,
'numiters': self.numiters,
'feepoints': self.feepoints,
'expectedwaits': self.expectedwaits.waits,
'expectedwaits_stderr': self.expectedwaits.errors,
'waitmatrix': [w.waits for w in self.waitmatrix],
}
return stats
def remove_lowfee(entries, feethresh):
"""Remove all low fee (< feethresh) transactions and their dependants.
"""
# Build a dependency map
depmap = defaultdict(list)
for txid, entry in entries.items():
for dep in entry.depends:
depmap[dep].append(txid)
removed = set()
for txid, entry in entries.items():
if entry.feerate < feethresh:
removelist = [txid]
while removelist:
txid_remove = removelist.pop()
if txid_remove in removed:
continue
removed.add(txid_remove)
removelist.extend(depmap[txid_remove])
return {txid: entry for txid, entry in entries.items()
if txid not in removed}
|
1658441
|
import sys
sys.path.insert(0, '../utils')
import ioManager
import new
sys.path.insert(0, '../connectors')
import transport
sys.path.insert(0,'../sequential')
import ff
inputS = transport.wires(1)
inputR = transport.wires(1)
out = transport.wires(2)
clock = transport.wires(1)
hware = ff.SRFlipFlop(inputS,inputR,out,clock)
iohandler = ioManager.StringIO(hware)
print iohandler.input('0','1','1')
|
1658455
|
personGroupId = 'test0' #test0
key = '17122c4be3214178ab93127fad066013'
BASE_URL = 'https://centralindia.api.cognitive.microsoft.com/face/v1.0/'
#BASE_URL = 'https://attendancemgmt.cognitiveservices.azure.com/face/v1.0/'
#key = 'b5005855a40e4331ba25d65902a8d4d1'
|
1658482
|
from pycocotools.coco import COCO
from tqdm import tqdm
import os
import json
import threading
from mmdet.apis import init_detector, inference_detector, show_result_pyplot
from mmdet.datasets.pipelines import Compose
import numpy as np
import matplotlib.pyplot as plt
import cv2
class CutConfig(object):
# process module
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='CutROI', training=False),
dict(type='CutImage', training=False, window=(1000, 1000), step=(500, 500), order_index=False,
is_keep_none=True)
]
compose = Compose(train_pipeline)
# data module
img_dir = "/home/lifeng/undone-work/DefectNet/tools/data/tile/raw/tile_round1_testA_20201231/testA_imgs"
test_file = "/home/lifeng/undone-work/dataset/detection/tile/annotations/instance_testA.json"
save_file = "/home/lifeng/undone-work/DetCompetition/mmdet-v2/work_dirs/tile/baseline_cut_1000x1000/do_submit_testA.json"
original_coco = COCO(test_file)
label2name = {x['id']: x['name'] for x in original_coco.dataset['categories']}
main_thread_lock = threading.Lock()
save_results = []
num_workers = 7
process_cnt = 0
# inference module
device = 'cuda:0'
config_file = '/home/lifeng/undone-work/DefectNet/configs/tile/baseline_model_2000x2000.py'
checkpoint_file = '/data/liphone/detcomp/mmdet-v2/tile/baseline_cut_1000x1000/epoch_12.pth'
model = init_detector(config_file, checkpoint_file, device=device)
def do_work(images, config):
for image in tqdm(images):
image['filename'] = image['file_name']
results = {
'img_prefix': config.img_dir,
'img_info': image}
results = config.compose(results)
if results is None: results = []
for i, result in enumerate(results):
bbox_result = inference_detector(config.model, result['img'])
# img = np.array(result['img'])
for label, predicts in enumerate(bbox_result):
for r in predicts:
# b = list(map(int, r[:4]))
# cv2.rectangle(img, tuple(b[:2]), tuple(b[2:]), (255, 0, 0), 3)
bbox = list(map(float, r[:4]))
if 'top_left' in result:
bbox = [bbox[0] + result['top_left'][0], bbox[1] + result['top_left'][1],
bbox[2] + result['top_left'][0], bbox[3] + result['top_left'][1]]
if 'roi_top_left' in result:
bbox = [bbox[0] + result['roi_top_left'][0], bbox[1] + result['roi_top_left'][1],
bbox[2] + result['roi_top_left'][0], bbox[3] + result['roi_top_left'][1]]
category_id, score = config.label2name[label + 1], r[4]
pred = {'name': str(image['filename']), 'category': int(category_id),
'bbox': bbox,
'score': float(score)}
config.save_results.append(pred)
# plt.imshow(img)
# plt.show()
# cv2.imwrite("a.jpg", img)
config.process_cnt += 1
if config.process_cnt % 1 == 0 or config.process_cnt == len(images):
print("process {}/{}...".format(config.process_cnt, len(images)))
# for rst in config.save_results:
# img = cv2.imread(os.path.join(config.img_dir, rst['name']))
# b = list(map(int, rst['bbox'][:4]))
# cv2.rectangle(img, tuple(b[:2]), tuple(b[2:]), (255, 0, 0), 3)
# #plt.imshow(img)
# #plt.show()
# cv2.imwrite("a.jpg", img)
return True
def main():
config = CutConfig()
if not os.path.exists(os.path.dirname(config.save_file)):
os.makedirs(os.path.dirname(config.save_file))
dataset = config.original_coco.dataset
dataset['images'] = dataset['images']
per_work_size = len(dataset['images']) // max(config.num_workers, 1)
fetch, cnt = [], 0
threads = []
for i in range(config.num_workers):
start = i * per_work_size
end = start + per_work_size
if (i + 1) == config.num_workers:
end = len(dataset['images'])
images = dataset['images'][start:end]
cnt += len(images)
threads.append(threading.Thread(target=do_work, args=(images, config)))
assert cnt == len(dataset['images'])
for t in threads:
t.start()
for t in threads:
t.join()
with open(config.save_file, "w") as fp:
json.dump(config.save_results, fp, indent=4, ensure_ascii=False)
print("process ok!")
if __name__ == '__main__':
main()
|
1658537
|
import importlib as il
import importlib.resources as ir
import pathlib as pl
import pkgutil as pu
import sys
import chemex.containers.conditions as ccc
import chemex.experiments.configs as cec
import chemex.helper as ch
def read(filename, model, selection=None, defaults=None):
if selection is None:
selection = {"include": None, "exclude": None}
if defaults is None:
defaults = {}
config = ch.read_toml(filename)
config["filename"] = pl.Path(filename)
config["model"] = model
config["selection"] = selection
config["defaults"] = defaults
config["conditions"] = ccc.parse_conditions(config)
module = grab(get_experiment_name(config))
return module.read(config)
def get_experiment_name(config):
filename = config["filename"]
if "experiment" not in config:
sys.exit(
f"\nerror: The experiment file '{filename}' has no section '[experiment]'."
)
elif "name" not in config["experiment"]:
sys.exit(
f"\nerror: The experiment file '{filename}' has no entry 'name' in the "
f"section '[experiment]'."
)
return config["experiment"]["name"]
def grab(name):
try:
module = il.import_module(f"{__package__}.{name}")
except ModuleNotFoundError:
sys.exit(
f"\nerror: '{name}' is not part of our experiment collection! "
f"Run 'chemex info' to obtain the full list of the available experiments."
)
else:
return module
def get_info():
docs = {}
for module in pu.iter_modules(__path__, __name__ + "."):
if module.ispkg or "helper" in module.name:
continue
imported_module = il.import_module(module.name)
exp_name = module.name.replace(__name__ + ".", "")
docs[exp_name] = imported_module.__doc__
return docs
def get_config():
return {
name.replace(".toml", ""): ir.read_text(cec, name)
for name in ir.contents(cec)
if name.endswith(".toml")
}
|
1658588
|
from libsaas import http, parsers
from libsaas.services import base
from . import resource, media
class LocationBase(resource.ReadonlyResource):
path = 'locations'
class Locations(LocationBase):
path = 'locations/search'
@base.apimethod
def get(self, lat=None, distance=None, lng=None,
foursquare_v2_id=None, foursquare_id=None):
"""
fetch all locations by geographic coordinate.
:var lat: Latitude of the center search coordinate.
If used, lng is required.
:vartype lat: float
:var distance: Default is 1km (distance=1000), max distance is 5km.
:vartype distance: int
:var lng: Longitude of the center search coordinate.
If used, lat is required.
:vartype lng: float
:var foursquare_v2_id: A foursquare v2 api location id.
If used, you are not required to use lat and ln
:vartype foursquare_v2_id: str
:var foursquare_id: A foursquare v1 api location id.
If used, you are not required to use lat and lng.
Note that this method is deprecated; you should use the
new foursquare IDs with V2 of their API.
:vartype foursquare_id: str
"""
params = base.get_params(
('lat', 'distance', 'lng', 'foursquare_v2_id', 'foursquare_id'),
locals())
request = http.Request('GET', self.get_url(), params)
return request, parsers.parse_json
class Location(LocationBase):
@base.resource(media.RecentMedia)
def recent_media(self):
"""
Return the resource corresponding to all recent media
for the location.
"""
return media.RecentMedia(self)
|
1658615
|
from torch import nn
'''
MobileNetV2 blocks from https://github.com/pytorch/vision/blob/master/torchvision/models/mobilenet.py
Copyright (c) <NAME> 2016,
All rights reserved.
'''
class ConvBNReLU(nn.Sequential):
def __init__(self, in_planes, out_planes, kernel_size=3, stride=1, groups=1):
padding = (kernel_size - 1) // 2
super(ConvBNReLU, self).__init__(
nn.Conv2d(in_planes, out_planes, kernel_size, stride, padding, groups=groups, bias=False),
nn.BatchNorm2d(out_planes),
nn.ReLU6(inplace=True)
)
class InvertedResidual(nn.Module):
def __init__(self, inp, oup, kernel_size, stride, expand_ratio, use_res_connect):
super(InvertedResidual, self).__init__()
self.stride = stride
assert stride in [1, 2]
hidden_dim = int(round(inp * expand_ratio))
self.kernel_size = kernel_size
self.use_res_connect = use_res_connect
layers = []
if expand_ratio != 1:
# pw
layers.append(ConvBNReLU(inp, hidden_dim, kernel_size=1))
layers.extend([
# dw
ConvBNReLU(hidden_dim, hidden_dim, stride=stride, groups=hidden_dim, kernel_size=self.kernel_size),
# pw-linear
nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False),
nn.BatchNorm2d(oup),
])
self.conv = nn.Sequential(*layers)
def forward(self, x):
if self.use_res_connect:
return x + self.conv(x)
else:
return self.conv(x)
|
1658643
|
from PyQt5.QtCore import Qt, QAbstractTableModel, QSortFilterProxyModel, QDateTime
from PyQt5.QtWidgets import QWidget, QLabel, QMessageBox, QStyledItemDelegate, QStyleOptionViewItem, QStyledItemDelegate
from PyQt5.QtGui import QIcon, QColor
from forms.ui_paymentsPage import Ui_PaymentsPage
from utils import timeout_bool
from transactionDescDialog import TransactionDescDialog
import datetime
import operator
class PaymentsPage(QWidget, Ui_PaymentsPage):
"""The page to display LN payments"""
def __init__(self, plugin):
super().__init__()
self.paymentsData = []
self.setupUi(self)
self.plugin = plugin
self.paymentsTableheaders = '', 'Date', 'Type', 'Label', 'Payment Hash', 'Amount (mBTC)',''
self.paymentsModel = TableModel(self.paymentsTableheaders)
self.proxyModel = CustomSortingModel()
self.proxyModel.setSourceModel(self.paymentsModel)
self.populatePaymentsData()
self.paymentsModel.setData(self.paymentsData)
self.paymentsTableView.setModel(self.proxyModel)
self.paymentsTableView.sortByColumn(1, Qt.DescendingOrder)
self.setView()
self.initUi()
def initUi(self):
"""Initialize the UI by connecting actions"""
self.paymentsTableView.doubleClicked.connect(self.showDetails)
def setView(self):
"""Set the Table sizes"""
self.paymentsTableView.setColumnWidth(0, 30)
self.paymentsTableView.setColumnWidth(1, 140)
self.paymentsTableView.setColumnWidth(2, 100)
self.paymentsTableView.setColumnWidth(3, 500)
self.paymentsTableView.setColumnWidth(4, 300)
self.paymentsTableView.selectRow(0)
self.paymentsTableView.setColumnHidden(6,True)
self.paymentsTableView.setColumnHidden(4,True)
def populatePayments(self):
"""Update payments list"""
self.paymentsModel.layoutAboutToBeChanged.emit()
self.populatePaymentsData()
self.paymentsModel.setData(self.paymentsData)
self.paymentsModel.layoutChanged.emit()
def populatePaymentsData(self):
"""Update payments data"""
self.paymentsData = []
"""Update pays history list"""
pays = self.plugin.rpc.listpays()
# Condition to prevent RPC errors
if pays:
for pay in pays["pays"]:
decodedPay = self.plugin.rpc.decodepay(pay["bolt11"])
if "label" in pay:
self.paymentsData.append([pay["status"],datetime.datetime.fromtimestamp(decodedPay["created_at"]),"Pay",pay["label"],pay["payment_hash"],decodedPay["msatoshi"],pay["bolt11"]])
else:
self.paymentsData.append([pay["status"],datetime.datetime.fromtimestamp(decodedPay["created_at"]),"Pay","-",pay["payment_hash"],decodedPay["msatoshi"],pay["bolt11"]])
invoices = self.plugin.rpc.listinvoices()
# Condition to prevent RPC errors
if invoices:
for invoice in invoices["invoices"]:
decodedPay = self.plugin.rpc.decodepay(invoice["bolt11"])
if "label" in invoice:
self.paymentsData.append([invoice["status"],datetime.datetime.fromtimestamp(decodedPay["created_at"]),"Invoice",invoice["label"],invoice["payment_hash"],decodedPay["msatoshi"],invoice["bolt11"]])
else:
self.paymentsData.append([invoice["status"],datetime.datetime.fromtimestamp(decodedPay["created_at"]),"Invoice","-",invoice["payment_hash"],decodedPay["msatoshi"],invoice["bolt11"]])
def showDetails(self):
index = self.paymentsTableView.currentIndex()
value=index.sibling(index.row(),6).data()
decodedPay = self.plugin.rpc.decodepay(value)
dialog = TransactionDescDialog(decodedPay)
dialog.exec_()
class CustomSortingModel(QSortFilterProxyModel):
def lessThan(self,left,right):
col = left.column()
dataleft = left.data()
dataright = right.data()
if col == 1:
dataleft = QDateTime.fromString(dataleft, "dd/MM/yyyy hh:mm")
dataright = QDateTime.fromString(dataright, "dd/MM/yyyy hh:mm")
return dataleft < dataright
class TableModel(QAbstractTableModel):
def __init__(self, headerin):
super(TableModel, self).__init__()
self._arraydata = None
self._headerdata = headerin
def data(self, index, role):
if role == Qt.DisplayRole and index.column() != 0:
if index.column() == 3 and self._arraydata[index.row()][3] == '-':
value = '(' + self._arraydata[index.row()][4] + ')'
elif index.column() == 5 and self._arraydata[index.row()][2] == 'Pay':
value = self._arraydata[index.row()][5] * -1
else:
value = self._arraydata[index.row()][index.column()]
if isinstance(value, datetime.datetime):
return value.strftime('%d/%m/%Y' ' %H:%M')
return value
if role == Qt.DecorationRole:
if index.column() == 0:
value = self._arraydata[index.row()][index.column()]
if value == "complete" or value == "paid":
return QIcon(":/icons/success")
elif value == "failed":
return QIcon(":/icons/failed")
elif value == "expired":
return QIcon(":/icons/expired")
elif value == "unpaid":
return QIcon(":/icons/pending")
if index.column() == 3:
value = self._arraydata[index.row()][index.column()-1]
if value == "Pay":
return QIcon(":/icons/txoutput")
elif value == "Invoice":
return QIcon(":/icons/txinput")
if role == Qt.BackgroundRole and index.row() % 2 == 1:
return QColor(247,247,247)
if role == Qt.ForegroundRole:
value = self._arraydata[index.row()][0]
if value == "expired" or value == "failed" or value == "unpaid":
return QColor(140,140,140)
if index.column() == 5 and self._arraydata[index.row()][index.column()-3] == 'Pay':
return QColor(255,0,0)
if index.column() == 5 and self._arraydata[index.row()][index.column()-3] == 'Invoice' and self._arraydata[index.row()][0] == 'complete':
return QColor(0,255,0)
if role == Qt.TextAlignmentRole and index.column() == 5:
return Qt.AlignRight | Qt.AlignVCenter
if role == Qt.ToolTipRole:
value = self._arraydata[index.row()][0] + '\n ' + self._arraydata[index.row()][1].strftime('%d/%m/%Y' ' %H:%M') + '\n' + self._arraydata[index.row()][2] + '\n' + self._arraydata[index.row()][3] + '\n' + self._arraydata[index.row()][4] + '\n' + str(self._arraydata[index.row()][5])
return value
def setData(self, datain):
self._arraydata = datain
def rowCount(self, index):
# The length of the outer list.
return len(self._arraydata)
def columnCount(self, index):
return len(self._headerdata)
def headerData(self, section, orientation, role):
if role == Qt.TextAlignmentRole:
if section == 5 :
return Qt.AlignRight | Qt.AlignVCenter
else :
return Qt.AlignLeft | Qt.AlignVCenter
if role == Qt.DisplayRole :
if orientation == Qt.Horizontal :
return self._headerdata[section]
elif orientation == Qt.Vertical :
return self._headerdata[section]
return None
|
1658660
|
class EventEmitter(object):
def __init__(self):
self._on_handlers = {}
self._once_handlers = {}
def on(self, event, handler):
if event not in self._on_handlers:
self._on_handlers[event] = []
self._on_handlers[event].append(handler)
def once(self, event, handler):
if event not in self._once_handlers:
self._once_handlers[event] = []
self._once_handlers[event].append(handler)
def emit(self, event, *args, **kwargs):
handlers = self._once_handlers.pop(event, [])
handlers += self._on_handlers.get(event, [])
for handler in handlers:
handler(*args, **kwargs)
|
1658713
|
from django import template
from django.conf import settings
from django.core.cache import cache
from links.models import Link
register = template.Library()
@register.inclusion_tag('links/_object_links.html')
def object_links(obj):
if hasattr(obj, 'cached_links'):
obj_links = obj.cached_links
else:
obj_links = Link.objects.for_model(obj)
return {'links': obj_links, 'MEDIA_URL': settings.MEDIA_URL}
@register.inclusion_tag('links/_object_icon_links.html')
def object_icon_links(obj):
"Display links as icons, to match the new design"
key = "%s.%s.%s" % (obj._meta.app_label, obj._meta.module_name, obj.pk)
obj_links = cache.get(key, None) # look in the cache first
if obj_links is None: # if not found in cache
if hasattr(obj, 'cached_links'):
obj_links = obj.cached_links
else:
obj_links = Link.objects.for_model(obj) # get it from db
cache.set(key, obj_links, settings.LONG_CACHE_TIME) # and save to cache
return {'links': obj_links}
|
1658763
|
from django_tgbot.exceptions import ProcessFailure
from django_tgbot.state_manager import state_types
from django_tgbot.state_manager.transition_condition import TransitionCondition
import inspect
from django_tgbot.state_manager.state_manager import StateManager
def processor(manager: StateManager, from_states=None, message_types=None, update_types=None,
exclude_message_types=None, exclude_update_types=None, success=None, fail=None):
def state_registrar(func):
if func is None:
raise ValueError("Passed processor is None.")
all_args = inspect.getfullargspec(func)
if not all([
x in all_args[0] for x in ['bot', 'update', 'state']
]):
raise ValueError("Passed processor does not have a valid signature.")
def function_runner(bot, update, state, *args, **kwargs):
current_state = state.name
try:
func(bot=bot, update=update, state=state, *args, **kwargs)
if success == state_types.Reset:
state.name = ''
state.save()
elif success == state_types.Keep:
state.name = current_state
state.save()
elif success is not None:
state.name = success
state.save()
except ProcessFailure:
if fail == state_types.Reset:
state.name = ''
state.save()
elif fail == state_types.Keep:
state.name = current_state
state.save()
elif fail is not None:
state.name = fail
state.save()
altered_message_types = message_types
altered_update_types = update_types
if altered_message_types is None:
altered_message_types = manager.default_message_types
if altered_update_types is None:
altered_update_types = manager.default_update_types
manager.register_state(TransitionCondition(
from_states=from_states,
message_types=altered_message_types,
exclude_message_types=exclude_message_types,
update_types=altered_update_types,
exclude_update_types=exclude_update_types,
), processor=function_runner)
return function_runner
return state_registrar
|
1658769
|
def insert_suffix(text, keyword, inserted_text):
"""
在关键字后面插入文本 (从头算起的第1个关键字)
:param text:
:param keyword:
:param inserted_text:
:return: str: 插入后的结果
"""
position = text.find(keyword)
if position != -1:
new_text = text[:position + len(keyword)] + inserted_text + text[position + len(keyword):]
return new_text
else:
raise RuntimeError('keyword not in text')
def insert_prefix(text, keyword, inserted_text):
"""
在关键字前面插入文本 (从头算起的第1个关键字)
:param text:
:param keyword:
:param inserted_text:
:return: str: 插入后的结果
"""
position = text.find(keyword)
if position != -1:
new_text = text[:position] + inserted_text + text[position:]
return new_text
else:
raise RuntimeError('keyword not in text')
def del_next_line(text, keyword):
"""
删除下一行
:param text:
:param keyword:
:return: str: 删除行之后的结果
"""
position = text.find(keyword)
if position != -1:
text_pre = text[:position + len(keyword)]
text_rear = text[position + len(keyword):]
text_rear = text_rear[text_rear.find('\n') + 1:]
return text_pre + text_rear[text_rear.find('\n'):]
else:
raise RuntimeError('keyword not in text')
def del_pre_line(text, keyword):
"""
删除上一行
:param text:
:param keyword:
:return: str: 删除行之后的结果
"""
position = text.find(keyword)
if position != -1:
pos_line_head = text.rfind('\n', 0, position)
pos_pre_line_head = text.rfind('\n', 0, pos_line_head)
return text[0:pos_pre_line_head] + text[pos_line_head:]
else:
raise RuntimeError('keyword not in text')
|
1658794
|
import gpflow
import meshzoo
import numpy as np
import pytest
import tensorflow as tf
from geometric_kernels.backends.tensorflow import GPflowGeometricKernel
from geometric_kernels.kernels import MaternKarhunenLoeveKernel
from geometric_kernels.spaces import Mesh
class DefaultFloatZero(gpflow.mean_functions.Constant):
"""
Simple zero mean function that uses gpflow's default_float
as dtype instead of the default input's dtype. In our case this
leads to dtype mismatch because the inputs are integer indices.
"""
def __init__(self, output_dim=1):
super().__init__()
self.output_dim = output_dim
del self.c
def __call__(self, inputs):
output_shape = tf.concat([tf.shape(inputs)[:-1], [self.output_dim]], axis=0)
return tf.zeros(output_shape, dtype=gpflow.default_float())
# filename = Path(__file__).parent / "../teddy.obj"
# mesh = Mesh.load_mesh(str(filename))
# return mesh
# TODO(VD) This needs fixing!
@pytest.mark.skip()
def test_gpflow_integration():
"""
Build GPflow GPR model with a Mesh Geometric Kernel.
"""
resolution = 5
vertices, faces = meshzoo.icosa_sphere(resolution)
mesh = Mesh(vertices, faces)
nu = 1 / 2.0
truncation_level = 20
base_kernel = MaternKarhunenLoeveKernel(mesh, nu, truncation_level)
kernel = GPflowGeometricKernel(base_kernel)
num_data = 25
def get_data():
# np.random.seed(1)
_X = np.random.randint(mesh.num_vertices, size=(num_data, 1))
_K = kernel.K(_X).numpy()
_y = np.linalg.cholesky(_K + np.eye(num_data) * 1e-6) @ np.random.randn(
num_data, 1
)
return _X, _y
X, y = get_data()
model = gpflow.models.GPR(
(X, y), kernel, mean_function=DefaultFloatZero(), noise_variance=1.1e-6
)
print(model.log_marginal_likelihood())
X_test = np.arange(mesh.num_vertices).reshape(-1, 1)
# print(X_test)
m, v = model.predict_f(X_test)
m, v = m.numpy(), v.numpy()
model.predict_f_samples(X_test).numpy()
# print(sample.shape)
# ps.init()
# ps_cloud = ps.register_point_cloud("my points", vertices[X.flatten()])
# ps_cloud.add_scalar_quantity("data", y.flatten())
# my_mesh = ps.register_surface_mesh("my mesh", vertices, faces, smooth_shade=True)
# my_mesh.add_scalar_quantity(f"sample", sample.squeeze(), enabled=True)
# my_mesh.add_scalar_quantity(f"mean", m.squeeze(), enabled=True)
# my_mesh.add_scalar_quantity(f"variance", v.squeeze(), enabled=True)
# ps.show()
|
1658813
|
class Solution:
def fourSum(self, nums: List[int], target: int) -> List[List[int]]:
return self.kSum(sorted(nums), target, 4)
def kSum(self, nums: List[int], target: int, k: int) -> List[List[int]]:
if not nums or nums[0] * k > target or target > nums[-1] * k: return []
if k == 2: return self.twoSum(nums, target)
res = []
for i in range(len(nums)):
if i > 0 and nums[i] == nums[i - 1]: continue
for sub_sum in self.kSum(nums[i + 1:], target - nums[i], k - 1):
res.append([nums[i]] + sub_sum)
return res
def twoSum(self, nums: List[int], target: int) -> List[List[int]]:
res = []
s = set()
for i in range(len(nums)):
if (not res or res[-1][1] != nums[i]) and target - nums[i] in s:
res.append([target - nums[i], nums[i]])
s.add(nums[i])
return res
|
1658814
|
from Helper.Utilities import *
from .BaseModel import *
from .XBill import XBill
from Classifier.Classifier import Classifier
from Config.const import BillStatus
class WeChatBill(BillModel):
id = AutoField(primary_key=True, column_name='id')
trans_time = DateTimeField(column_name='trans_time')
trans_type = CharField(30, column_name='trans_type')
trader_name = CharField(60, column_name='trader_name')
product_name = CharField(150, column_name='product_name')
fund_status = FixedCharField(15, column_name='fund_status')
amount = DecimalField(max_digits=10, decimal_places=2, column_name='amount')
pay_type = CharField(60, column_name='pay_type')
trans_status = FixedCharField(30, column_name='trans_status')
trans_id = CharField(50, column_name='trans_id', unique=True)
trader_id = CharField(50, column_name='trader_id')
remarks = TextField(column_name='remarks', null=True)
account = '微信'
titles = ['交易时间', '交易类型', '交易对方', '商品', '收/支', '金额(元)', '支付方式', '当前状态', '交易单号', '商户单号', '备注']
class Meta:
db_table = 'wechat'
def __str__(self):
return str(self.__data__)
def to_xbill(self) -> XBill:
def unify_status(fund_status):
if '支出' in fund_status:
status = BillStatus.PAYOUT
elif '收入' in fund_status:
status = BillStatus.INCOME
else:
status = BillStatus.INTERNAL_TRANS
return status
def format_remarks(*args) -> str:
s = [v for v in args if v != '/']
return ';'.join(s)
xbill = XBill()
xbill.account = self.account
xbill.amount = self.amount
xbill.currency = '人民币'
xbill.trans_time = self.trans_time
xbill.status = unify_status(self.fund_status)
xbill.trader_name = self.trader_name
xbill.product_name = self.product_name
xbill.remarks = format_remarks(self.pay_type, self.trans_type, self.trans_status, self.remarks)
xbill.associate_id = -1
xbill.status, xbill.category, xbill.subcategory = Classifier().classify(xbill)
return xbill
def is_exist(self) -> bool:
query = self.__class__.get_or_none(trans_id=self.trans_id)
ret = True if query else False
return ret
@classmethod
def create_from_row(cls, row) -> "WeChatBill":
def to_float(s: str) -> float:
s = remove_comma(s)
s = s.replace('¥', '')
return float(s)
bill = WeChatBill()
bill.trans_time = str_to_datetime(row[0].strip())
bill.trans_type = row[1].strip()
bill.trader_name = row[2].strip()
bill.product_name = row[3].strip()
bill.fund_status = row[4].strip()
bill.amount = to_float(row[5].strip())
bill.pay_type = row[6].strip()
bill.trans_status = row[7].strip()
bill.trans_id = row[8].strip()
bill.trader_id = row[9].strip()
bill.remarks = row[10].strip()
return bill
|
1658858
|
import torch
import torch.nn as nn
import copy
class GramMatrix(nn.Module):
def forward(self, input):
_, channels, h, w = input.size()
out = input.view(-1, h * w)
out = torch.mm(out, out.t())
return out.div(channels * h * w)
class StyleLoss(nn.Module):
def __init__(self, target, weight):
super().__init__()
self.target = target.detach() * weight
self.weight = weight
self.criterion = nn.MSELoss()
self.gm = GramMatrix()
def forward(self, input):
gm = self.gm(input.clone())
loss = self.criterion(gm * self.weight, self.target)
return loss
def check_layers(layers):
"""
relu1_* - 2, relu2_* - 2, relu3_* - 4, relu4_* - 4, relu5_* - 4
"""
in_layers = []
for layer in layers:
layer = layer[-3:]
if layer[0] == '1' or layer[0] == '2':
in_layers += [2 * (int(layer[0]) - 1) + int(layer[2]) - 1]
else:
in_layers += [4 * (int(layer[0]) - 3) + int(layer[2]) + 3]
return in_layers
class Vgg_Model(nn.Module):
def __init__(self, vgg):
super().__init__()
self.layers = copy.deepcopy(vgg)
def forward(self, input, out_layers):
relu_outs, out = [], input
out_layers = check_layers(out_layers)
for layer in self.layers:
out = layer(out)
if isinstance(layer, nn.ReLU):
relu_outs.append(out)
outs = [relu_outs[index - 1] for index in out_layers]
return outs
if __name__ == '__main__':
from torch.autograd import Variable
from torchvision.models import vgg19
from img_loader import IMG_Processer
STYLE_LAYERS = ('relu1_1', 'relu2_1', 'relu3_1', 'relu4_1', 'relu5_1')
CONTENT_LAYERS = ('relu4_2',)
vgg = vgg19(True).features
vm = Vgg_Model(vgg)
vm = vm.cuda()
ip = IMG_Processer()
_style, _content = ip.img2tensor('night.jpg', 'Tuebingen_Neckarfront.jpg')
_style = Variable(_content.unsqueeze(0))
_style = _style.cuda()
# print(vm(_style, STYLE_LAYERS))
print(vm(_style, STYLE_LAYERS + CONTENT_LAYERS))
|
1658863
|
import argparse
from datetime import datetime as dt
import pathlib
from .. import fem_data as fd
def main():
parser = argparse.ArgumentParser(
description="Convert FEM file format.")
parser.add_argument(
'input_type',
type=str,
help='Input file type')
parser.add_argument(
'output_type',
type=str,
help='Output file type')
parser.add_argument(
'input_path',
type=pathlib.Path,
help='Input file path')
parser.add_argument(
'-o', '--output-directory',
type=pathlib.Path,
default=None,
help='Output directory path')
args = parser.parse_args()
if args.input_path.is_dir():
input_directory = args.input_path
fem_data = fd.FEMData.read_directory(
args.input_type, input_directory, read_npy=False)
elif args.input_path.is_file():
input_directory = args.input_path.parent
fem_data = fd.FEMData.read_files(
args.input_type, [args.input_path])
else:
raise ValueError(
f"{args.input_path} is neither directory nor file.")
if args.output_directory is None:
args.output_directory = input_directory
date_string = dt.now().isoformat().replace('T', '_').replace(':', '-')
fem_data.write(
args.output_type, args.output_directory / ('out_' + date_string),
overwrite=False)
if __name__ == '__main__':
main()
|
1658888
|
import unittest
from contextlib import redirect_stdout
from ctypes import cdll
import random
import os
import sys
import multiprocessing
from multiprocessing import Pipe, Value
import logging
from typing import Tuple, Any
from batchkit.utils import tee_to_pipe_decorator, NonDaemonicPool, FailedRecognitionError
"""
In this test module, there are primarily three things under test:
1 -- tee_to_pipe_decorator
2 -- NonDaemonicPool
3 -- Our intended usage pattern of using them together when the pool worker proc
uses a subproc and gets back return value and exception.
"""
logger = logging.getLogger("test_pool_tee")
# logger.level = logging.DEBUG
logger.level = logging.INFO
log_stream_handler = logging.StreamHandler(sys.stdout)
# Toggle this to get useful debug trace.
# logger.addHandler(log_stream_handler)
test_root = os.path.dirname(os.path.realpath(__file__))
libblah_path = os.path.join(test_root, 'resources/libsegv.so')
lock = multiprocessing.Lock()
count_terms = Value('i', 0, lock=True)
count_exceptions = Value('i', 0, lock=True)
count_returns = Value('i', 0, lock=True)
# Emulates the work item on pool, run by the pool worker proc.
# Delegates the dangerous stuff to a subproc. We test the full
# pattern using tee_to_pipe_decorator() even though we may not
# have all of sig term, exception, and return in one app.
def parent_entry(id: int):
global count_terms, count_exceptions, count_returns
parent_conn, child_conn = Pipe()
work_proc = multiprocessing.Process(
target=tee_to_pipe_decorator(work_entry, child_conn),
args=(id,))
work_proc.start()
_, status = os.waitpid(work_proc.pid, 0)
if os.WIFSIGNALED(status):
signum = os.WTERMSIG(status)
assert signum == 11
assert not parent_conn.poll()
logger.debug("TERM")
with count_terms.get_lock():
count_terms.value += 1
else:
assert os.WIFEXITED(status)
# We either have a return value or an exception
assert parent_conn.poll()
obj = parent_conn.recv()
if isinstance(obj, Exception):
logger.debug("EXCEPTION")
with count_exceptions.get_lock():
count_exceptions.value += 1
# Making sure it's actually raisable else this pool proc dies
# and we deadlock outside.
try:
raise obj
except Exception as e:
logger.debug("CAUGHT MYSELF: {0}".format(e))
else:
# This would fail if obj were not the successful return type.
assert obj[0] == 123
assert obj[1] == 456
logger.debug("RETURN")
with count_returns.get_lock():
count_returns.value += 1
parent_conn.close()
child_conn.close()
logger.debug("Parent {0} is returning".format(id))
return None
def work_entry(somearg: int) -> Tuple[int, int]:
if random.choice(["succeed", "segv"]) == "succeed":
if random.choice(["succeed", "throw"]) == "succeed":
return 123, 456
else:
raise FailedRecognitionError("a failed recognition")
else:
lib = cdll.LoadLibrary(libblah_path)
lib.foo()
return 123, 456
count_pool_success = 0
count_pool_errors = 0
def on_finish(anything: Any):
global count_pool_success
lock.acquire()
count_pool_success += 1
lock.release()
def on_error(anything: Any):
global count_pool_errors
lock.acquire()
count_pool_errors += 1
lock.release()
class TestPoolWithTee(unittest.TestCase):
global count_pool_success, count_pool_errors
global count_terms, count_exceptions, count_returns
def test_NonDaemonicPool_with_tee_to_pipe(self):
# Disable because we know it works and interfering due to parallel tests (need to prevent that).
# return
pool_procs = 4
num_tasks = 100
p = NonDaemonicPool(pool_procs)
for i in range(num_tasks):
p.apply_async(parent_entry, [i], callback=on_finish, error_callback=on_error)
p.close()
p.join()
logger.debug("Final count_pool_success: {0}".format(count_pool_success))
logger.debug("Final count_pool_errors: {0}".format(count_pool_errors))
assert count_pool_success == num_tasks
assert count_pool_errors == 0
# Ensure we saw at least one of each.
logger.debug("Final count_exceptions: {0}".format(count_exceptions.value))
logger.debug("Final count_returns: {0}".format(count_returns.value))
logger.debug("Final count_terms: {0}".format(count_terms.value))
assert count_exceptions.value > 0
assert count_returns.value > 0
assert count_terms.value > 0
assert count_exceptions.value + count_returns.value + count_terms.value == num_tasks
if __name__ == '__main__':
multiprocessing.set_start_method('fork')
unittest.main()
|
1658925
|
from re import I, sub
import zmq
import rx
import click
import logging
import coloredlogs
import asyncio
import random
import functools
import json
from asyncio import AbstractEventLoop
from bson import json_util
from rx import operators as ops
from rx import Observable
from rx.subject import Subject
from rx.scheduler import ThreadPoolScheduler, CatchScheduler, CurrentThreadScheduler
from rx.scheduler.periodicscheduler import PeriodicScheduler
from rx.scheduler.eventloop import AsyncIOThreadSafeScheduler
from rx.core.typing import Observer, Scheduler, OnNext, OnError, OnCompleted
from rx.disposable import Disposable
# from zmq.sugar.context import Context
from zmq.sugar.socket import Socket
from zmq.asyncio import Context, Poller
from typing import Optional, Union, Dict
from trader.common.helpers import from_aiter
class MessagingPublisher(Observer):
def __init__(self,
publish_ip_address: str = '127.0.0.1',
publish_port: int = 5001,
loop: AbstractEventLoop = None):
super(Observer, self).__init__()
self.publish_ip_address = publish_ip_address
self.publish_port = publish_port
self.loop = loop
self.publish_context = Context()
self.publish_socket = self.publish_context.socket(zmq.PUB) # type: ignore
self.publish_socket.bind('tcp://{}:{}'.format(self.publish_ip_address, self.publish_port))
def on_next(self, message: Dict):
if not type(message) == dict:
raise ValueError('message should be of type dict')
json_message = json.dumps(message, default=json_util.default)
self.publish_socket.send_string(json_message)
def on_completed(self):
logging.info('MessagingPublisher completed')
def on_error(self, error):
logging.error(error)
class MessagingSubscriber(Observable):
def __init__(self,
subscribe_ip_address: str = '127.0.0.1',
subscribe_port: int = 5002,
loop: AbstractEventLoop = None):
super(Observable, self).__init__()
self.subscribe_ip_address = subscribe_ip_address
self.subscribe_port = subscribe_port
self.loop = loop
self.subscribe_context = Context()
self.subscribe_socket = self.subscribe_context.socket(zmq.SUB) # type: ignore
self.subscribe_socket.connect('tcp://{}:{}'.format(self.subscribe_ip_address, self.subscribe_port))
self.subscribe_socket.setsockopt_string(zmq.SUBSCRIBE, '') # type: ignore
self.finished = False
self.disposable: rx.core.typing.Disposable
async def listen_publisher(self):
while not self.finished:
json_message = await self.subscribe_socket.recv_string()
message = json.loads(json_message, object_hook=json_util.object_hook)
yield message
def subscribe(self,
observer: Optional[Union[Observer, OnNext]] = None,
on_error: Optional[OnError] = None,
on_completed: Optional[OnCompleted] = None,
on_next: Optional[OnNext] = None,
*,
scheduler: Optional[Scheduler] = None) -> rx.core.typing.Disposable:
disposable = from_aiter(self.listen_publisher(), self.loop)
if observer:
self.disposable = disposable.subscribe(observer=observer, scheduler=scheduler)
else:
self.disposable = disposable.subscribe(on_next=on_next,
on_error=on_error,
on_completed=on_completed,
scheduler=scheduler)
return self.disposable
def dispose(self):
self.disposable.dispose()
def test(loop):
async def main(loop):
done = asyncio.Future()
subscriber = MessagingSubscriber(loop=loop)
subscriber.subscribe(on_next=lambda message: print(message))
await done
loop = asyncio.get_event_loop()
loop.run_until_complete(main(loop))
|
1658938
|
import json
from primehub import Helpful, Module, cmd, PrimeHubConfig
from tests import BaseTestCase
class ConfigWatcher(Helpful, Module):
@cmd(name='watch', description='watch-primehub-config')
def watch(self):
# ensure the config file is changed
assert self.primehub.primehub_config.config_file != self.primehub.primehub_config.get_default_path()
from tempfile import mkstemp
fd, path = mkstemp('.json', text=True)
self.primehub.primehub_config.save(path=path)
return dict(config_file=path)
def help_description(self):
return "Big Brother Watch YOU"
class TestCliConfig(BaseTestCase):
def setUp(self) -> None:
super(TestCliConfig, self).setUp()
# clean commands, add the FakeCommand
self.sdk.register_command('test_cli_config', ConfigWatcher)
def test_config_options(self):
"""
Global Options:
--config CONFIG the path of the config file
--endpoint ENDPOINT the endpoint to the PrimeHub GraphQL URL
--token TOKEN API Token generated from PrimeHub Console
--group GROUP override the active group
"""
group_info = {'name': 'test-config-from-cli:group'}
self.mock_request.return_value = {'data': {'me': {'effectiveGroups': [group_info]}}}
c = self.make_cfg()
# Verify the saved config will have test-config-from-cli:*
self.assert_config_with_prefix('test-config-from-cli', c)
# Verify other options
group_info = {'name': 'opt:group'}
self.mock_request.return_value = {'data': {'me': {'effectiveGroups': [group_info]}}}
c = self.make_cfg(['--endpoint', 'opt:endpoint', '--token', 'opt:api-token', '--group', 'opt:group'])
self.assert_config_with_prefix('opt', c)
def make_cfg(self, extra_args: list = None):
if extra_args is None:
extra_args = []
cfg_path = self.create_fake_config(self.cfg_dict_with_prefix('test-config-from-cli'))
output = self.cli_stdout(['app.py', 'test_cli_config', 'watch', '--config', cfg_path] + extra_args)
# bug: the output buffer contains the previous result, we only take the last one
output = output.strip().split("\n")[-1]
saved_cfg_path = json.loads(output)['config_file']
new_cfg = PrimeHubConfig(config=saved_cfg_path)
return new_cfg
|
1658945
|
from aiohttp import web
from aiohttp_security import authorized_userid, permits
async def test_authorized_userid(loop, aiohttp_client):
async def check(request):
userid = await authorized_userid(request)
assert userid is None
return web.Response()
app = web.Application()
app.router.add_route('GET', '/', check)
client = await aiohttp_client(app)
resp = await client.get('/')
assert 200 == resp.status
async def test_permits(loop, aiohttp_client):
async def check(request):
ret = await permits(request, 'read')
assert ret
ret = await permits(request, 'write')
assert ret
ret = await permits(request, 'unknown')
assert ret
return web.Response()
app = web.Application()
app.router.add_route('GET', '/', check)
client = await aiohttp_client(app)
resp = await client.get('/')
assert 200 == resp.status
|
1658950
|
from collections import deque
from hashlib import sha256
BASE58_ALPHABET = '123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz'
BASE58_ALPHABET_LIST = list(BASE58_ALPHABET)
def b58encode(bytestr):
"""encode to base58"""
alphabet = BASE58_ALPHABET_LIST
encoded = deque()
append = encoded.appendleft
_divmod = divmod
num = int.from_bytes(bytestr, 'big')
while num > 0:
num, rem = _divmod(num, 58)
append(alphabet[rem])
encoded = ''.join(encoded)
pad = 0
for byte in bytestr:
if byte == 0:
pad += 1
else:
break
return '1' * pad + encoded
def b58decode(bytestr, length):
"""decode from base58"""
n = 0
for char in bytestr:
n = n * 58 + BASE58_ALPHABET.index(char)
return n.to_bytes(length, 'big')
def b58check(bytestr):
"""check if base58 encoded address is valid"""
try:
bcbytes = b58decode(bytestr, 25)
return bcbytes[-4:] == sha256(sha256(bcbytes[:-4]).digest()).digest()[:4]
except Exception:
return False
|
1658969
|
from . import res_company
from . import base_account
from . import account_move
from . import eletronic_document
from . import nfe_models
from . import nfe
from . import fiscal_position
from . import res_config_settings
from . import res_partner
|
1658980
|
from django.contrib.auth import get_user_model
from django.core.cache import cache as default_cache
from rest_framework.test import APITestCase
from durin.models import AuthToken, Client
User = get_user_model()
class CustomTestCase(APITestCase):
def setUp(self):
# cleanup
default_cache.clear()
AuthToken.objects.all().delete()
Client.objects.all().delete()
# setup
self.authclient = Client.objects.create(name="authclientfortest")
username = "john.doe"
email = "<EMAIL>"
password = "<PASSWORD>"
self.user = User.objects.create_user(username, email, password)
self.creds = {
"username": username,
"password": password,
"client": self.authclient.name,
}
username2 = "jane.doe"
email2 = "<EMAIL>"
password2 = "<PASSWORD>"
self.user2 = User.objects.create_user(username2, email2, password2)
self.creds2 = {
"username": username2,
"password": <PASSWORD>,
"client": self.authclient.name,
}
self.client_names = ["web", "mobile", "cli"]
def _create_clients(self) -> None:
Client.objects.all().delete()
self.assertEqual(Client.objects.count(), 0)
for name in self.client_names:
Client.objects.create(name=name)
self.assertEqual(Client.objects.count(), len(self.client_names))
def _create_authtoken(self, user=None, client_name=None) -> AuthToken:
if not user:
user = self.user
if not client_name:
client_name = "customtestcase_client"
client, _ = Client.objects.get_or_create(name=client_name)
try:
token = AuthToken.objects.get(user=user, client=client)
except AuthToken.DoesNotExist:
token = AuthToken.objects.create(user=user, client=client)
return token
|
1659021
|
import clr
clr.AddReference('RevitAPI')
from Autodesk.Revit.DB import *
clr.AddReference("RevitServices")
import RevitServices
from RevitServices.Persistence import DocumentManager
doc = DocumentManager.Instance.CurrentDBDocument
items = UnwrapElement(IN[0])
typelist = list()
for item in items:
try:
typelist.append(doc.GetElement(item.GetTypeId()))
except:
typelist.append(list())
OUT = typelist
|
1659128
|
import os, gc, shutil, uuid
from datetime import datetime, timezone
import json, asyncio
from __app__.TrainingModule import logHandler
from functools import wraps
from __app__.TrainingModule.ResourceFilterHelper import findProductId
from __app__.TrainingModule.ModelTrainer import ModelTrainPublish
from __app__.TrainingModule.TrainingConfig import TrainingConfig
from __app__.TrainingModule.Exceptions import *
def getUTCTime():
return datetime.now(timezone.utc)
def getLatency(startTime, endTime):
return (endTime-startTime).total_seconds()*1000
async def triggerTrainingMethod(data):
if not "productId" in data:
return ("Please provide a productId for training", 400)
productId = data["productId"]
exists = findProductId(productId)
if not exists:
return ('Data not available for productId {0}'.format(productId), 404)
if not 'trainingConfig' in data:
return ("No config provided for training", 400)
trainingConfig = TrainingConfig(json.loads(data["trainingConfig"]))
if (trainingConfig.trainDetectors or trainingConfig.trainUtterances):
trainingId = str(uuid.uuid4())
try:
trainingHandler = ModelTrainPublish(trainingId, productId, trainingConfig)
msg = await trainingHandler.trainPublish()
if not msg:
msg = "Model Trained successfully"
return (f"{msg} for productId {productId} - trainingId {trainingId}", 200)
except Exception as e:
logHandler.error("Exception: {0}".format(str(e)))
return (str(e), 500)
else:
return ("Training flags are all set to false. Training not required", 400)
|
1659139
|
import contextlib
import os
import csv
import io
import boto
from skills_utils.s3 import split_s3_path
from skills_ml.datasets.onet_source import OnetToMemoryDownloader
from skills_ml.storage import InMemoryStore
class OnetCache(object):
"""
An object that downloads and caches ONET files from S3
"""
def __init__(self, s3_conn, s3_path, cache_dir):
"""
Args:
s3_conn: a boto s3 connection
s3_path: path to the onet directory
cache_dir: directory to cache files
"""
self.s3_conn = s3_conn
self.cache_dir = cache_dir
self.s3_path = s3_path
self.bucket_name, self.prefix = split_s3_path(self.s3_path)
@contextlib.contextmanager
def ensure_file(self, filename):
"""
Ensures that the given ONET data file is present, either by
using a cached copy or downloading from S3
Args:
filename: unpathed filename of an ONET file (Skills.txt)
Yields:
Full path to file on local filesystem
"""
full_path = os.path.join(self.cache_dir, filename)
if os.path.isfile(full_path):
yield full_path
else:
if not os.path.isdir(self.cache_dir):
os.mkdir(self.cache_dir)
bucket = self.s3_conn.get_bucket(self.bucket_name)
key = boto.s3.key.Key(
bucket=bucket,
name='{}/{}'.format(self.prefix, filename)
)
key.get_contents_to_filename(full_path)
yield full_path
class OnetSiteCache(object):
"""
An object that downloads files from the ONET site
"""
def __init__(self, storage=None):
"""
Args:
storage: Storage object to cache files
"""
self.storage = storage or InMemoryStore('')
self.downloader = OnetToMemoryDownloader()
def reader(self, filename):
"""
Ensures that the given ONET data file is present, either by
using a cached copy or downloading from S3
Args:
filename: unpathed filename of an ONET file (Skills)
Returns:
csv.DictReader
"""
if not self.storage.exists(filename):
self.storage.write(self.downloader.download(filename).encode('utf-8'), filename)
contents = self.storage.load(filename)
return csv.DictReader(io.StringIO(contents.decode('utf-8')), delimiter='\t')
|
1659168
|
import unittest
from mock import patch
from packages import directions_to, mapps
class TestDirectionsTo(unittest.TestCase):
"""
A test class that contains test cases for the main method of
directions_to.
"""
@patch.object(mapps, 'directions')
def test_directions_with_start_and_destination_city(self, mock_directions):
from_city = 'London'
to_city = 'Manchester'
data = "from {} to {}".format(from_city, to_city)
directions_to.main(data)
mock_directions.assert_called_once_with(to_city, from_city)
@patch.object(mapps, 'directions')
def test_directions_with_destination_and_start_city(self, mock_directions):
from_city = 'Madrid'
to_city = 'Valencia'
data = "to {} from {}".format(to_city, from_city)
directions_to.main(data)
mock_directions.assert_called_once_with(to_city, from_city)
@patch.object(mapps, 'directions')
def test_directions_with_only_destination_city(self, mock_directions):
from_city = 0
to_city = 'Paris'
data = "to {}".format(to_city)
directions_to.main(data)
mock_directions.assert_called_once_with(to_city, from_city)
if __name__ == '__main__':
unittest.main()
|
1659195
|
from django.forms import fields as django_forms_fields
from rest_framework.metadata import SimpleMetadata
from rest_framework.utils.field_mapping import ClassLookupDict
try:
from django_filters.rest_framework import DjangoFilterBackend
except ImportError:
DjangoFilterBackend = None
if DjangoFilterBackend is not None:
from django_filters import fields as django_filters_fields
FORM_FIELDS = {
django_forms_fields.NullBooleanField: 'boolean',
django_forms_fields.BooleanField: 'boolean',
django_forms_fields.URLField: 'url',
django_forms_fields.EmailField: 'email',
django_forms_fields.RegexField: 'regex',
django_forms_fields.SlugField: 'slug',
django_forms_fields.IntegerField: 'integer',
django_forms_fields.FloatField: 'float',
django_forms_fields.DecimalField: 'decimal',
django_forms_fields.DateField: 'date',
django_forms_fields.DateTimeField: 'datetime',
django_forms_fields.TimeField: 'time',
django_forms_fields.ChoiceField: 'choice',
django_filters_fields.ChoiceField: 'choice',
django_forms_fields.MultipleChoiceField: 'multiple choice',
django_filters_fields.MultipleChoiceField: 'multiple choice',
django_forms_fields.FileField: 'file upload',
django_forms_fields.FilePathField: 'file upload',
django_forms_fields.ImageField: 'image upload',
django_filters_fields.ModelMultipleChoiceField: 'list',
django_filters_fields.ModelChoiceField: 'nested object',
}
else:
FORM_FIELDS = {}
class AngularDjangoMetadata(SimpleMetadata):
label_form_lookup = ClassLookupDict(FORM_FIELDS)
def determine_metadata(self, request, view):
metadata = super().determine_metadata(request, view)
filters = {}
for filter_backend in view.filter_backends:
backend = filter_backend()
if DjangoFilterBackend is not None and isinstance(backend, DjangoFilterBackend):
fields = backend.get_filterset_class(view)().form.fields
else:
fields = {}
if hasattr(backend, 'get_schema_operation_parameters'):
for f in backend.get_schema_operation_parameters(view):
f = dict(f)
field = fields.get(f['name'])
try:
f['type'] = self.label_form_lookup[field]
except KeyError:
pass
if field:
f['label'] = field.label
if field and f.get('type') == 'choice':
f['choices'] = [{'value': choice[0], 'display_name': choice[1]} for choice in field.choices]
filters[f['name']] = f
metadata['filters'] = filters
return metadata
|
1659197
|
from http.server import HTTPServer, BaseHTTPRequestHandler
from concurrent.futures.thread import ThreadPoolExecutor
import json
from lambda_log_shipper.logs_manager import LogsManager
from lambda_log_shipper.utils import (
LOG_SUBSCRIBER_PORT,
HEADERS_ID_KEY,
lambda_service,
get_logger,
never_fail,
)
LOG_SUBSCRIPTION_REQUEST = {
"destination": {
"protocol": "HTTP",
"URI": f"http://sandbox:{LOG_SUBSCRIBER_PORT}",
},
"types": ["platform", "function"],
}
TIMEOUT_SAFETY_GAP = 0.5
def subscribe_to_logs(extension_id):
server = HTTPServer(("0.0.0.0", LOG_SUBSCRIBER_PORT), LogsHttpRequestHandler)
server.server_activate()
ThreadPoolExecutor().submit(server.serve_forever)
body = json.dumps(LOG_SUBSCRIPTION_REQUEST)
conn = lambda_service()
conn.request(
"PUT", "/2020-08-15/logs", body, headers={HEADERS_ID_KEY: extension_id}
)
class LogsHttpRequestHandler(BaseHTTPRequestHandler):
def do_POST(self):
with never_fail("parse logs event"):
size = int(self.headers.get("Content-Length", "0"))
records = json.loads(self.rfile.read(size))
get_logger().info(records)
LogsManager.get_manager().add_records(records)
self.send_response(200)
self.end_headers()
def log_message(self, *args):
# Do not write console logs per request
return
|
1659206
|
import time
import os.path as osp
import itertools
import argparse
import wget
import torch
from scipy.io import loadmat
from torch_scatter import scatter_add
from torch_sparse.tensor import SparseTensor
short_rows = [
('DIMACS10', 'citationCiteseer'),
('SNAP', 'web-Stanford'),
]
long_rows = [
('Janna', 'StocF-1465'),
('GHS_psdef', 'ldoor'),
]
def download(dataset):
url = 'https://sparse.tamu.edu/mat/{}/{}.mat'
for group, name in itertools.chain(long_rows, short_rows):
if not osp.exists(f'{name}.mat'):
print(f'Downloading {group}/{name}:')
wget.download(url.format(group, name))
print('')
def bold(text, flag=True):
return f'\033[1m{text}\033[0m' if flag else text
@torch.no_grad()
def correctness(dataset):
group, name = dataset
mat_scipy = loadmat(f'{name}.mat')['Problem'][0][0][2].tocsr()
row = torch.from_numpy(mat_scipy.tocoo().row).to(args.device, torch.long)
col = torch.from_numpy(mat_scipy.tocoo().col).to(args.device, torch.long)
mat = SparseTensor(row=row, col=col, sparse_sizes=mat_scipy.shape)
mat.fill_cache_()
mat_pytorch = mat.to_torch_sparse_coo_tensor().coalesce()
for size in sizes:
try:
x = torch.randn((mat.size(1), size), device=args.device)
out1 = mat @ x
out2 = mat_pytorch @ x
assert torch.allclose(out1, out2, atol=1e-4)
except RuntimeError as e:
if 'out of memory' not in str(e):
raise RuntimeError(e)
torch.cuda.empty_cache()
def time_func(func, x):
try:
if torch.cuda.is_available():
torch.cuda.synchronize()
t = time.perf_counter()
if not args.with_backward:
with torch.no_grad():
for _ in range(iters):
func(x)
else:
x = x.requires_grad_()
for _ in range(iters):
out = func(x)
out = out[0] if isinstance(out, tuple) else out
torch.autograd.grad(out, x, out, only_inputs=True)
if torch.cuda.is_available():
torch.cuda.synchronize()
return time.perf_counter() - t
except RuntimeError as e:
if 'out of memory' not in str(e):
raise RuntimeError(e)
torch.cuda.empty_cache()
return float('inf')
def timing(dataset):
group, name = dataset
mat_scipy = loadmat(f'{name}.mat')['Problem'][0][0][2].tocsr()
row = torch.from_numpy(mat_scipy.tocoo().row).to(args.device, torch.long)
col = torch.from_numpy(mat_scipy.tocoo().col).to(args.device, torch.long)
mat = SparseTensor(row=row, col=col, sparse_sizes=mat_scipy.shape)
mat.fill_cache_()
mat_pytorch = mat.to_torch_sparse_coo_tensor().coalesce()
mat_scipy = mat.to_scipy(layout='csr')
def scatter(x):
return scatter_add(x[col], row, dim=0, dim_size=mat_scipy.shape[0])
def spmm_scipy(x):
if x.is_cuda:
raise RuntimeError('out of memory')
return mat_scipy @ x
def spmm_pytorch(x):
return mat_pytorch @ x
def spmm(x):
return mat @ x
t1, t2, t3, t4 = [], [], [], []
for size in sizes:
try:
x = torch.randn((mat.size(1), size), device=args.device)
t1 += [time_func(scatter, x)]
t2 += [time_func(spmm_scipy, x)]
t3 += [time_func(spmm_pytorch, x)]
t4 += [time_func(spmm, x)]
del x
except RuntimeError as e:
if 'out of memory' not in str(e):
raise RuntimeError(e)
torch.cuda.empty_cache()
for t in (t1, t2, t3, t4):
t.append(float('inf'))
ts = torch.tensor([t1, t2, t3, t4])
winner = torch.zeros_like(ts, dtype=torch.bool)
winner[ts.argmin(dim=0), torch.arange(len(sizes))] = 1
winner = winner.tolist()
name = f'{group}/{name}'
print(f'{bold(name)} (avg row length: {mat.avg_row_length():.2f}):')
print('\t'.join([' '] + [f'{size:>5}' for size in sizes]))
print('\t'.join([bold('Scatter ')] +
[bold(f'{t:.5f}', f) for t, f in zip(t1, winner[0])]))
print('\t'.join([bold('SPMM SciPy ')] +
[bold(f'{t:.5f}', f) for t, f in zip(t2, winner[1])]))
print('\t'.join([bold('SPMM PyTorch')] +
[bold(f'{t:.5f}', f) for t, f in zip(t3, winner[2])]))
print('\t'.join([bold('SPMM Own ')] +
[bold(f'{t:.5f}', f) for t, f in zip(t4, winner[3])]))
print()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--with_backward', action='store_true')
parser.add_argument('--device', type=str, default='cuda')
args = parser.parse_args()
iters = 1 if args.device == 'cpu' else 20
sizes = [1, 16, 32, 64, 128, 256, 512]
sizes = sizes[:4] if args.device == 'cpu' else sizes
for _ in range(10): # Warmup.
torch.randn(100, 100, device=args.device).sum()
for dataset in itertools.chain(short_rows, long_rows):
download(dataset)
correctness(dataset)
timing(dataset)
|
1659208
|
from __future__ import absolute_import
from .serializable import Serializable
class KeyPair(Serializable):
"""
The interface of a key pair. A key pair is a pair consisting of a private and a public
key used for en- and decryption.
"""
def __init__(self, priv = None, pub = None):
"""
Initiate a KeyPair instance using the key information provided as parameters.
:param priv: The private key as a bytes-like object or None.
:param pub: The public key as a bytes-like object or None.
"""
raise NotImplementedError
@classmethod
def generate(cls):
"""
:returns: A new key pair with private and public key set.
"""
raise NotImplementedError
@property
def priv(self):
"""
:returns: A bytes-like object encoding the private key of this key pair instance.
"""
raise NotImplementedError
@property
def pub(self):
"""
:returns: A bytes-like object encoding the public key of this key pair instance.
"""
raise NotImplementedError
def encrypt(self, data, other):
"""
Encrypt given data using the private key stored by this KeyPair instance, for the
public key stored by the other instance.
:param data: The data to encrypt. A bytes-like object.
:param other: An instance of the KeyPair class. The public key to encrypt the data
for.
:returns: The encrypted data.
:raises MissingKeyException: If any key is missing to complete this operation. The
exception message will contain (human-readable) details.
"""
raise NotImplementedError
def decrypt(self, data, other):
"""
Decrypt the encrypted data using the private key stored by this KeyPair instance,
for the public key stored by the other instance.
:param data: The data to decrypt. A bytes-like object.
:param other: An instance of the KeyPair class. The public key to decrypt the data
from.
:returns: The decrypted plain data.
:raises MissingKeyException: If any key is missing to complete this operation. The
exception message will contain (human-readable) details.
"""
raise NotImplementedError
def getSharedSecret(self, other):
"""
Get a shared secret between the keys stored by this instance and the keys stored
by the other instance.
The shared secrets are generated, so that following equation is True: ::
shared_secret(A.priv, B.pub) == shared_secret(B.priv, A.pub)
:param other: An instance of the KeyPair class.
:returns: The shared secret, as a bytes-like object.
:raises MissingKeyException: If any key is missing to complete this operation. The
exception message will contain (human-readable) details.
"""
raise NotImplementedError
|
1659237
|
import factory
from wagtail.core.models import PageViewRestriction, BaseViewRestriction
from wagtail_factories import PageFactory
class PageViewRestrictionFactory(factory.django.DjangoModelFactory):
page = factory.SubFactory(PageFactory)
restriction_type = BaseViewRestriction.PASSWORD
class Meta:
model = PageViewRestriction
|
1659250
|
import pickle
import unittest
import numpy as np
from softlearning.replay_pools.trajectory_replay_pool import (
TrajectoryReplayPool)
def create_pool(max_size=100):
return TrajectoryReplayPool(
observation_space=None,
action_space=None,
max_size=max_size,
)
def verify_pools_match(pool1, pool2):
for key in pool2.__dict__:
if key == '_trajectories':
pool1_trajectories = pool1.__dict__[key]
pool2_trajectories = pool2.__dict__[key]
for pool1_trajectory, pool2_trajectory in (
zip(pool1_trajectories, pool2_trajectories)):
assert pool1_trajectory.keys() == pool2_trajectory.keys()
for field_name in pool1_trajectory.keys():
np.testing.assert_array_equal(
pool1_trajectory[field_name],
pool2_trajectory[field_name],
f"key '{key}', field_name '{field_name}' doesn't match"
)
else:
np.testing.assert_array_equal(
pool1.__dict__[key],
pool2.__dict__[key],
f"key '{key}' doesn't match")
class TrajectoryReplayPoolTest(unittest.TestCase):
def setUp(self):
self.pool = create_pool(10)
def test_save_load_latest_experience(self):
self.assertEqual(self.pool._trajectories_since_save, 0)
num_trajectories_per_save = self.pool._max_size // 2
trajectory_length = 10
trajectories = [
{
'field1': np.arange(trajectory_length)[:, None],
'field2': -np.arange(trajectory_length)[:, None] * 2,
}
for _ in range(num_trajectories_per_save)
]
self.pool.add_paths(trajectories)
self.assertEqual(self.pool.num_trajectories, num_trajectories_per_save)
self.assertEqual(self.pool.size,
num_trajectories_per_save * trajectory_length)
self.assertEqual(self.pool._trajectories_since_save,
num_trajectories_per_save)
self.pool.save_latest_experience('./tmp/pool_1.pkl')
self.assertEqual(self.pool._trajectories_since_save, 0)
self.pool.add_paths(trajectories)
self.assertEqual(self.pool.size,
self.pool._max_size * trajectory_length)
self.assertEqual(self.pool._trajectories_since_save,
num_trajectories_per_save)
self.pool.save_latest_experience('./tmp/pool_2.pkl')
self.pool.add_paths(trajectories)
self.assertEqual(self.pool.size,
self.pool._max_size * trajectory_length)
self.assertEqual(self.pool._trajectories_since_save,
num_trajectories_per_save)
self.pool.save_latest_experience('./tmp/pool_3.pkl')
pool = create_pool(self.pool._max_size)
self.assertEqual(pool.size, 0)
pool.load_experience('./tmp/pool_1.pkl')
self.assertEqual(pool.num_trajectories, self.pool._max_size // 2)
self.assertEqual(pool.size,
(self.pool._max_size // 2) * trajectory_length)
pool.load_experience('./tmp/pool_2.pkl')
self.assertEqual(pool.num_trajectories, self.pool._max_size)
self.assertEqual(pool.size,
(self.pool._max_size) * trajectory_length)
self.assertEqual(pool.size, self.pool.size)
pool.load_experience('./tmp/pool_3.pkl')
self.assertEqual(pool.size, self.pool.size)
self.assertEqual(pool.size,
(self.pool._max_size) * trajectory_length)
for trajectory1, trajectory2 in zip(
pool._trajectories, self.pool._trajectories):
self.assertEqual(trajectory1.keys(), trajectory2.keys())
for key in trajectory1:
np.testing.assert_array_equal(trajectory1[key], trajectory2[key])
def test_save_load_latest_experience_empty_pool(self):
self.assertEqual(self.pool._trajectories_since_save, 0)
self.pool.save_latest_experience('./tmp/pool_1.pkl')
pool = create_pool(self.pool._max_size)
pool.load_experience('./tmp/pool_1.pkl')
self.assertEqual(pool.size, 0)
def test_save_latest_experience_with_overflown_pool(self):
self.assertEqual(self.pool._trajectories_since_save, 0)
num_trajectories = self.pool._max_size + 2
trajectory_length = 10
trajectories = [
{
'field1': np.arange(trajectory_length)[:, None],
'field2': -np.arange(trajectory_length)[:, None] * 2,
}
for _ in range(num_trajectories)
]
self.pool.add_paths(trajectories)
self.assertEqual(self.pool.num_trajectories, self.pool._max_size)
self.assertEqual(self.pool._trajectories_since_save,
self.pool._max_size + 2)
self.pool.save_latest_experience('./tmp/pool_1.pkl')
pool = create_pool(self.pool._max_size)
self.assertEqual(pool.size, 0)
import gzip
with gzip.open('./tmp/pool_1.pkl', 'rb') as f:
latest_trajectories = pickle.load(f)
self.assertEqual(len(latest_trajectories), self.pool._max_size)
pool.load_experience('./tmp/pool_1.pkl')
self.assertEqual(pool.size,
self.pool._max_size * trajectory_length)
for trajectory1, trajectory2 in zip(
trajectories, self.pool._trajectories):
self.assertEqual(trajectory1.keys(), trajectory2.keys())
for field_name in trajectory1:
np.testing.assert_array_equal(
trajectory1[field_name], trajectory2[field_name])
def test_serialize_deserialize_full(self):
# Fill fields with random data
num_trajectories = self.pool._max_size + 2
trajectory_length = 10
trajectories = [
{
'field1': np.arange(trajectory_length)[:, None],
'field2': -np.arange(trajectory_length)[:, None] * 2,
}
for _ in range(num_trajectories)
]
self.pool.add_paths(trajectories)
self.assertEqual(self.pool.num_trajectories, self.pool._max_size)
self.assertEqual(self.pool.size,
trajectory_length * self.pool._max_size)
serialized = pickle.dumps(self.pool)
deserialized = pickle.loads(serialized)
verify_pools_match(self.pool, deserialized)
self.assertNotEqual(id(self.pool), id(deserialized))
self.assertEqual(deserialized.num_trajectories, self.pool._max_size)
self.assertEqual(deserialized.size,
trajectory_length * self.pool._max_size)
def test_serialize_deserialize_not_full(self):
# Fill fields with random data
num_trajectories = self.pool._max_size - 2
trajectory_length = 10
trajectories = [
{
'field1': np.arange(trajectory_length)[:, None],
'field2': -np.arange(trajectory_length)[:, None] * 2,
}
for _ in range(num_trajectories)
]
self.pool.add_paths(trajectories)
self.assertEqual(self.pool.num_trajectories, num_trajectories)
self.assertEqual(self.pool.size,
num_trajectories * trajectory_length)
serialized = pickle.dumps(self.pool)
deserialized = pickle.loads(serialized)
verify_pools_match(self.pool, deserialized)
self.assertNotEqual(id(self.pool), id(deserialized))
self.assertEqual(deserialized.num_trajectories, num_trajectories)
self.assertEqual(deserialized.size,
num_trajectories * trajectory_length)
def test_serialize_deserialize_empty(self):
# Fill fields with random data
self.assertEqual(self.pool.num_trajectories, 0)
self.assertEqual(self.pool.size, 0)
serialized = pickle.dumps(self.pool)
deserialized = pickle.loads(serialized)
verify_pools_match(self.pool, deserialized)
self.assertNotEqual(id(self.pool), id(deserialized))
self.assertEqual(deserialized.num_trajectories, 0)
self.assertEqual(deserialized.size, 0)
def test_add_path(self):
for value in range(self.pool._max_size):
path = {
'field1': np.array([value]),
'field2': np.array([-value*2]),
}
self.pool.add_path(path)
self.assertEqual(len(self.pool._trajectories), self.pool._max_size)
for i, trajectory in enumerate(self.pool._trajectories):
np.testing.assert_array_equal(trajectory['field1'], [i])
np.testing.assert_array_equal(trajectory['field2'], [-i * 2])
def test_add_paths(self):
num_trajectories = 4
path_length = 10
paths = [
{
'field1': np.arange(path_length)[:, None],
'field2': -np.arange(path_length)[:, None] * 2,
}
for _ in range(num_trajectories)
]
self.pool.add_paths(paths)
self.assertEqual(self.pool.num_trajectories, num_trajectories)
self.assertEqual(self.pool.size, num_trajectories * path_length)
for trajectory in self.pool._trajectories:
np.testing.assert_array_equal(
trajectory['field1'],
np.arange(path_length)[:, None])
np.testing.assert_array_equal(
trajectory['field2'],
-np.arange(path_length)[:, None] * 2)
def test_random_batch(self):
empty_pool_batch = self.pool.random_batch(4)
self.assertFalse(empty_pool_batch)
num_trajectories = 4
trajectory_length = 10
trajectories = [
{
'field1': np.arange(trajectory_length)[:, None],
'field2': -np.arange(trajectory_length)[:, None] * 2,
}
for _ in range(num_trajectories)
]
self.pool.add_paths(trajectories)
full_pool_batch = self.pool.random_batch(4)
for key, values in full_pool_batch.items():
self.assertEqual(values.shape, (4, 1))
self.assertTrue(np.all(full_pool_batch['field1'] >= 0))
self.assertTrue(np.all(full_pool_batch['field2'] % 2 == 0))
self.assertTrue(np.all(full_pool_batch['field2'] <= 0))
def test_random_batch_with_variable_length_trajectories(self):
batch_size = 256
num_trajectories = 20
trajectories = [
{
'field1': np.arange(np.random.randint(50, 1000))[:, None],
}
for _ in range(num_trajectories)
]
self.pool.add_paths(trajectories)
batch = self.pool.random_batch(batch_size)
for key, values in batch.items():
self.assertEqual(values.shape, (batch_size, 1))
def test_last_n_batch(self):
empty_pool_batch = self.pool.last_n_batch(4)
self.assertFalse(empty_pool_batch)
num_trajectories = 4
trajectory_length = 10
trajectories = [
{
'field1': i * np.arange(trajectory_length)[:, None],
'field2': -i * np.arange(trajectory_length)[:, None] * 2,
}
for i in range(num_trajectories)
]
self.pool.add_paths(trajectories)
full_pool_batch = self.pool.last_n_batch(int(trajectory_length * 2.5))
for key, values in full_pool_batch.items():
expected = np.concatenate((
trajectories[-3][key][trajectory_length // 2:],
trajectories[-2][key],
trajectories[-1][key]
))
np.testing.assert_array_equal(values, expected)
self.assertEqual(values.shape, (2.5 * trajectory_length, 1))
self.pool.add_paths(trajectories)
full_pool_batch = self.pool.last_n_batch(int(trajectory_length * 2))
for key, values in full_pool_batch.items():
expected = np.concatenate((
trajectories[-2][key],
trajectories[-1][key]
))
np.testing.assert_array_equal(values, expected)
self.assertEqual(values.shape, (2 * trajectory_length, 1))
def test_last_n_batch_with_overflown_pool(self):
num_trajectories = self.pool._max_size + 2
trajectory_length = 10
trajectories = [
{
'field1': i * np.arange(trajectory_length)[:, None],
'field2': -i * np.arange(trajectory_length)[:, None] * 2,
}
for i in range(num_trajectories)
]
self.pool.add_paths(trajectories)
full_pool_batch = self.pool.last_n_batch(int(trajectory_length * 2.5))
for key, values in full_pool_batch.items():
expected = np.concatenate((
trajectories[-3][key][trajectory_length // 2:],
trajectories[-2][key],
trajectories[-1][key]
))
np.testing.assert_array_equal(values, expected)
self.assertEqual(values.shape, (2.5 * trajectory_length, 1))
def test_batch_by_indices(self):
with self.assertRaises(TypeError):
self.pool.batch_by_indices(np.array([-1, 2, 4]))
num_trajectories = 4
trajectory_length = 10
trajectories = [
{
'field1': np.arange(trajectory_length)[:, None],
'field2': -np.arange(trajectory_length)[:, None] * 2,
}
for _ in range(num_trajectories)
]
self.pool.add_paths(trajectories)
batch = self.pool.batch_by_indices(
np.repeat(np.arange(num_trajectories), trajectory_length),
np.tile(np.flip(np.arange(trajectory_length)), num_trajectories))
for field_name, values in batch.items():
field_expected = np.concatenate([
np.flip(trajectory[field_name]) for trajectory in trajectories])
np.testing.assert_array_equal(
batch[field_name],
field_expected)
if __name__ == '__main__':
unittest.main()
|
1659293
|
import argparse
import importlib
import os
import pkgutil
import shutil
from inspect import getmembers, isfunction
from string import Template
import igibson
from igibson import examples
from igibson.utils.assets_utils import download_assets
download_assets()
def main(exhaustive=False):
examples_list = []
num_first_options = {}
for package in pkgutil.walk_packages(examples.__path__, examples.__name__ + "."):
if (
not package.ispkg
and package.name[17:] != "example_selector"
and "web_ui" not in package.name[17:] # The WebUI examples require additional server setup
and "vr_" not in package.name[17:] # The VR examples require additional dependencies
and "ray_" not in package.name[17:] # The Ray/RLLib example does not run in a subprocess
): # Consider removing the last condition if we have runnable VR tests
examples_list += [package.name[17:]]
# Import the module and get the number of first options, if there is a function for it
# We use that to create the exhaustive examples iterating over the options in the first selection point
i = importlib.import_module(package.name)
if "get_first_options" in [name for (name, element) in getmembers(i)]:
num_first_options[package.name[17:]] = len(i.get_first_options())
else:
num_first_options[package.name[17:]] = 0
temp_folder_of_test = os.path.join("/", "tmp", "tests_of_examples")
shutil.rmtree(temp_folder_of_test, ignore_errors=True)
os.makedirs(temp_folder_of_test, exist_ok=True)
for example in examples_list:
if (
num_first_options[example] == 0 or not exhaustive
): # If we do not indicate an exhaustive test, we use random selection for all tests
template_file_name = os.path.join(igibson.__path__[0], "..", "tests", "test_of_example_template.txt")
with open(template_file_name, "r") as f:
substitutes = dict()
substitutes["module"] = example
name = example.rsplit(".", 1)[-1]
substitutes["name"] = name
substitutes["selection"] = '"random"'
src = Template(f.read())
dst = src.substitute(substitutes)
filename = os.path.join(temp_folder_of_test, name + "_test.py")
test_file = open(filename, "w")
print("Writing {}".format(filename))
n = test_file.write(dst)
test_file.close()
else:
for selection_option in range(1, num_first_options[example] + 1):
template_file_name = os.path.join(igibson.__path__[0], "..", "tests", "test_of_example_template.txt")
with open(template_file_name, "r") as f:
substitutes = dict()
substitutes["module"] = example
name = example.rsplit(".", 1)[-1] + "_{}".format(selection_option)
substitutes["name"] = name
substitutes["selection"] = selection_option
src = Template(f.read())
dst = src.substitute(substitutes)
filename = os.path.join(temp_folder_of_test, name + "_test.py")
test_file = open(filename, "w")
print("Writing {}".format(filename))
n = test_file.write(dst)
test_file.close()
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Create test files for the examples")
parser.add_argument(
"-e",
"--exhaustive",
action="store_true",
help="Whether to test all options in the first decision level or select randomly.",
)
args = parser.parse_args()
main(args.exhaustive)
|
1659320
|
from gluon.utils import web2py_uuid
cookie_key = cache.ram('cookie_key',lambda: web2py_uuid(),None)
session.connect(request,response,cookie_key=cookie_key)
|
1659324
|
from canvas_sdk import client, utils
def list_features_courses(request_ctx, course_id, per_page=None, **request_kwargs):
"""
List all features that apply to a given Account, Course, or User.
:param request_ctx: The request context
:type request_ctx: :class:RequestContext
:param course_id: (required) ID
:type course_id: string
:param per_page: (optional) Set how many results canvas should return, defaults to config.LIMIT_PER_PAGE
:type per_page: integer or None
:return: List features
:rtype: requests.Response (with array data)
"""
if per_page is None:
per_page = request_ctx.per_page
path = '/v1/courses/{course_id}/features'
payload = {
'per_page' : per_page,
}
url = request_ctx.base_api_url + path.format(course_id=course_id)
response = client.get(request_ctx, url, payload=payload, **request_kwargs)
return response
def list_features_accounts(request_ctx, account_id, per_page=None, **request_kwargs):
"""
List all features that apply to a given Account, Course, or User.
:param request_ctx: The request context
:type request_ctx: :class:RequestContext
:param account_id: (required) ID
:type account_id: string
:param per_page: (optional) Set how many results canvas should return, defaults to config.LIMIT_PER_PAGE
:type per_page: integer or None
:return: List features
:rtype: requests.Response (with array data)
"""
if per_page is None:
per_page = request_ctx.per_page
path = '/v1/accounts/{account_id}/features'
payload = {
'per_page' : per_page,
}
url = request_ctx.base_api_url + path.format(account_id=account_id)
response = client.get(request_ctx, url, payload=payload, **request_kwargs)
return response
def list_features_users(request_ctx, user_id, per_page=None, **request_kwargs):
"""
List all features that apply to a given Account, Course, or User.
:param request_ctx: The request context
:type request_ctx: :class:RequestContext
:param user_id: (required) ID
:type user_id: string
:param per_page: (optional) Set how many results canvas should return, defaults to config.LIMIT_PER_PAGE
:type per_page: integer or None
:return: List features
:rtype: requests.Response (with array data)
"""
if per_page is None:
per_page = request_ctx.per_page
path = '/v1/users/{user_id}/features'
payload = {
'per_page' : per_page,
}
url = request_ctx.base_api_url + path.format(user_id=user_id)
response = client.get(request_ctx, url, payload=payload, **request_kwargs)
return response
def list_enabled_features_courses(request_ctx, course_id, **request_kwargs):
"""
List all features that are enabled on a given Account, Course, or User.
Only the feature names are returned.
:param request_ctx: The request context
:type request_ctx: :class:RequestContext
:param course_id: (required) ID
:type course_id: string
:return: List enabled features
:rtype: requests.Response (with void data)
"""
path = '/v1/courses/{course_id}/features/enabled'
url = request_ctx.base_api_url + path.format(course_id=course_id)
response = client.get(request_ctx, url, **request_kwargs)
return response
def list_enabled_features_accounts(request_ctx, account_id, **request_kwargs):
"""
List all features that are enabled on a given Account, Course, or User.
Only the feature names are returned.
:param request_ctx: The request context
:type request_ctx: :class:RequestContext
:param account_id: (required) ID
:type account_id: string
:return: List enabled features
:rtype: requests.Response (with void data)
"""
path = '/v1/accounts/{account_id}/features/enabled'
url = request_ctx.base_api_url + path.format(account_id=account_id)
response = client.get(request_ctx, url, **request_kwargs)
return response
def list_enabled_features_users(request_ctx, user_id, **request_kwargs):
"""
List all features that are enabled on a given Account, Course, or User.
Only the feature names are returned.
:param request_ctx: The request context
:type request_ctx: :class:RequestContext
:param user_id: (required) ID
:type user_id: string
:return: List enabled features
:rtype: requests.Response (with void data)
"""
path = '/v1/users/{user_id}/features/enabled'
url = request_ctx.base_api_url + path.format(user_id=user_id)
response = client.get(request_ctx, url, **request_kwargs)
return response
def get_feature_flag_courses(request_ctx, course_id, feature, **request_kwargs):
"""
Get the feature flag that applies to a given Account, Course, or User.
The flag may be defined on the object, or it may be inherited from a parent
account. You can look at the context_id and context_type of the returned object
to determine which is the case. If these fields are missing, then the object
is the global Canvas default.
:param request_ctx: The request context
:type request_ctx: :class:RequestContext
:param course_id: (required) ID
:type course_id: string
:param feature: (required) ID
:type feature: string
:return: Get feature flag
:rtype: requests.Response (with FeatureFlag data)
"""
path = '/v1/courses/{course_id}/features/flags/{feature}'
url = request_ctx.base_api_url + path.format(course_id=course_id, feature=feature)
response = client.get(request_ctx, url, **request_kwargs)
return response
def get_feature_flag_accounts(request_ctx, account_id, feature, **request_kwargs):
"""
Get the feature flag that applies to a given Account, Course, or User.
The flag may be defined on the object, or it may be inherited from a parent
account. You can look at the context_id and context_type of the returned object
to determine which is the case. If these fields are missing, then the object
is the global Canvas default.
:param request_ctx: The request context
:type request_ctx: :class:RequestContext
:param account_id: (required) ID
:type account_id: string
:param feature: (required) ID
:type feature: string
:return: Get feature flag
:rtype: requests.Response (with FeatureFlag data)
"""
path = '/v1/accounts/{account_id}/features/flags/{feature}'
url = request_ctx.base_api_url + path.format(account_id=account_id, feature=feature)
response = client.get(request_ctx, url, **request_kwargs)
return response
def get_feature_flag_users(request_ctx, user_id, feature, **request_kwargs):
"""
Get the feature flag that applies to a given Account, Course, or User.
The flag may be defined on the object, or it may be inherited from a parent
account. You can look at the context_id and context_type of the returned object
to determine which is the case. If these fields are missing, then the object
is the global Canvas default.
:param request_ctx: The request context
:type request_ctx: :class:RequestContext
:param user_id: (required) ID
:type user_id: string
:param feature: (required) ID
:type feature: string
:return: Get feature flag
:rtype: requests.Response (with FeatureFlag data)
"""
path = '/v1/users/{user_id}/features/flags/{feature}'
url = request_ctx.base_api_url + path.format(user_id=user_id, feature=feature)
response = client.get(request_ctx, url, **request_kwargs)
return response
def set_feature_flag_courses(request_ctx, course_id, feature, state=None, locking_account_id=None, **request_kwargs):
"""
Set a feature flag for a given Account, Course, or User. This call will fail if a parent account sets
a feature flag for the same feature in any state other than "allowed".
:param request_ctx: The request context
:type request_ctx: :class:RequestContext
:param course_id: (required) ID
:type course_id: string
:param feature: (required) ID
:type feature: string
:param state: (optional) "off":: The feature is not available for the course, user, or account and sub-accounts. "allowed":: (valid only on accounts) The feature is off in the account, but may be enabled in sub-accounts and courses by setting a feature flag on the sub-account or course. "on":: The feature is turned on unconditionally for the user, course, or account and sub-accounts.
:type state: string or None
:param locking_account_id: (optional) If set, this FeatureFlag may only be modified by someone with administrative rights in the specified account. The locking account must be above the target object in the account chain.
:type locking_account_id: integer or None
:return: Set feature flag
:rtype: requests.Response (with FeatureFlag data)
"""
state_types = ('off', 'allowed', 'on')
utils.validate_attr_is_acceptable(state, state_types)
path = '/v1/courses/{course_id}/features/flags/{feature}'
payload = {
'state' : state,
'locking_account_id' : locking_account_id,
}
url = request_ctx.base_api_url + path.format(course_id=course_id, feature=feature)
response = client.put(request_ctx, url, payload=payload, **request_kwargs)
return response
def set_feature_flag_accounts(request_ctx, account_id, feature, state=None, locking_account_id=None, **request_kwargs):
"""
Set a feature flag for a given Account, Course, or User. This call will fail if a parent account sets
a feature flag for the same feature in any state other than "allowed".
:param request_ctx: The request context
:type request_ctx: :class:RequestContext
:param account_id: (required) ID
:type account_id: string
:param feature: (required) ID
:type feature: string
:param state: (optional) "off":: The feature is not available for the course, user, or account and sub-accounts. "allowed":: (valid only on accounts) The feature is off in the account, but may be enabled in sub-accounts and courses by setting a feature flag on the sub-account or course. "on":: The feature is turned on unconditionally for the user, course, or account and sub-accounts.
:type state: string or None
:param locking_account_id: (optional) If set, this FeatureFlag may only be modified by someone with administrative rights in the specified account. The locking account must be above the target object in the account chain.
:type locking_account_id: integer or None
:return: Set feature flag
:rtype: requests.Response (with FeatureFlag data)
"""
state_types = ('off', 'allowed', 'on')
utils.validate_attr_is_acceptable(state, state_types)
path = '/v1/accounts/{account_id}/features/flags/{feature}'
payload = {
'state' : state,
'locking_account_id' : locking_account_id,
}
url = request_ctx.base_api_url + path.format(account_id=account_id, feature=feature)
response = client.put(request_ctx, url, payload=payload, **request_kwargs)
return response
def set_feature_flag_users(request_ctx, user_id, feature, state=None, locking_account_id=None, **request_kwargs):
"""
Set a feature flag for a given Account, Course, or User. This call will fail if a parent account sets
a feature flag for the same feature in any state other than "allowed".
:param request_ctx: The request context
:type request_ctx: :class:RequestContext
:param user_id: (required) ID
:type user_id: string
:param feature: (required) ID
:type feature: string
:param state: (optional) "off":: The feature is not available for the course, user, or account and sub-accounts. "allowed":: (valid only on accounts) The feature is off in the account, but may be enabled in sub-accounts and courses by setting a feature flag on the sub-account or course. "on":: The feature is turned on unconditionally for the user, course, or account and sub-accounts.
:type state: string or None
:param locking_account_id: (optional) If set, this FeatureFlag may only be modified by someone with administrative rights in the specified account. The locking account must be above the target object in the account chain.
:type locking_account_id: integer or None
:return: Set feature flag
:rtype: requests.Response (with FeatureFlag data)
"""
state_types = ('off', 'allowed', 'on')
utils.validate_attr_is_acceptable(state, state_types)
path = '/v1/users/{user_id}/features/flags/{feature}'
payload = {
'state' : state,
'locking_account_id' : locking_account_id,
}
url = request_ctx.base_api_url + path.format(user_id=user_id, feature=feature)
response = client.put(request_ctx, url, payload=payload, **request_kwargs)
return response
def remove_feature_flag_courses(request_ctx, course_id, feature, **request_kwargs):
"""
Remove feature flag for a given Account, Course, or User. (Note that the flag must
be defined on the Account, Course, or User directly.) The object will then inherit
the feature flags from a higher account, if any exist. If this flag was 'on' or 'off',
then lower-level account flags that were masked by this one will apply again.
:param request_ctx: The request context
:type request_ctx: :class:RequestContext
:param course_id: (required) ID
:type course_id: string
:param feature: (required) ID
:type feature: string
:return: Remove feature flag
:rtype: requests.Response (with FeatureFlag data)
"""
path = '/v1/courses/{course_id}/features/flags/{feature}'
url = request_ctx.base_api_url + path.format(course_id=course_id, feature=feature)
response = client.delete(request_ctx, url, **request_kwargs)
return response
def remove_feature_flag_accounts(request_ctx, account_id, feature, **request_kwargs):
"""
Remove feature flag for a given Account, Course, or User. (Note that the flag must
be defined on the Account, Course, or User directly.) The object will then inherit
the feature flags from a higher account, if any exist. If this flag was 'on' or 'off',
then lower-level account flags that were masked by this one will apply again.
:param request_ctx: The request context
:type request_ctx: :class:RequestContext
:param account_id: (required) ID
:type account_id: string
:param feature: (required) ID
:type feature: string
:return: Remove feature flag
:rtype: requests.Response (with FeatureFlag data)
"""
path = '/v1/accounts/{account_id}/features/flags/{feature}'
url = request_ctx.base_api_url + path.format(account_id=account_id, feature=feature)
response = client.delete(request_ctx, url, **request_kwargs)
return response
def remove_feature_flag_users(request_ctx, user_id, feature, **request_kwargs):
"""
Remove feature flag for a given Account, Course, or User. (Note that the flag must
be defined on the Account, Course, or User directly.) The object will then inherit
the feature flags from a higher account, if any exist. If this flag was 'on' or 'off',
then lower-level account flags that were masked by this one will apply again.
:param request_ctx: The request context
:type request_ctx: :class:RequestContext
:param user_id: (required) ID
:type user_id: string
:param feature: (required) ID
:type feature: string
:return: Remove feature flag
:rtype: requests.Response (with FeatureFlag data)
"""
path = '/v1/users/{user_id}/features/flags/{feature}'
url = request_ctx.base_api_url + path.format(user_id=user_id, feature=feature)
response = client.delete(request_ctx, url, **request_kwargs)
return response
|
1659342
|
from urllib.parse import unquote
from django.core import mail
from django.test import TestCase
from testapp.models import Subscription
from testapp.urls import TestModelBackend
class MockRequest:
GET = {}
POST = {}
class SubscriptionTest(TestCase):
def test_model(self):
model = Subscription.objects.create(email="<EMAIL>")
self.assertEqual(str(model), "<EMAIL>")
def test_subscription(self):
self.assertContains(self.client.get("/newsletter/"), 'id="id_email"', 1)
response = self.client.post(
"/newsletter/",
{
"email": "",
"action": "subscribe",
},
)
self.assertEqual(response.status_code, 200)
response = self.client.post(
"/newsletter/",
{
"email": "<EMAIL>",
"action": "subscribe",
},
)
self.assertRedirects(response, "/newsletter/")
self.assertEqual(len(mail.outbox), 1)
body = mail.outbox[0].body
subscribe_url = unquote(
[line for line in body.splitlines() if "testserver" in line][0]
)
self.assertEqual(Subscription.objects.count(), 0)
response = self.client.get(subscribe_url)
self.assertEqual(Subscription.objects.filter(is_active=True).count(), 1)
self.assertContains(
self.client.get(
subscribe_url.replace("com:", "ch:"),
follow=True,
),
"We are sorry. This link is broken.",
status_code=200,
)
self.assertContains(response, 'id="id_full_name"')
response = self.client.post(
subscribe_url,
{
"full_name": "",
},
)
self.assertContains(response, "This field is required.", 1)
response = self.client.post(
subscribe_url,
{
"full_name": "<NAME>",
},
)
self.assertRedirects(
response,
subscribe_url.replace("http://testserver", ""),
)
subscription = Subscription.objects.get()
self.assertTrue(subscription.is_active)
self.assertEqual(subscription.email, "<EMAIL>")
self.assertEqual(subscription.full_name, "<NAME>")
self.assertContains(
self.client.post(
"/newsletter/",
{
"email": "<EMAIL>",
"action": "subscribe",
},
),
"This address is already subscribed to our newsletter.",
)
self.assertContains(
self.client.post(
"/newsletter/",
{
"email": "<EMAIL>",
"action": "unsubscribe",
},
follow=True,
),
"You have been unsubscribed.",
)
self.assertEqual(Subscription.objects.filter(is_active=True).count(), 0)
self.assertContains(
self.client.post(
"/newsletter/",
{
"email": "<EMAIL>",
"action": "unsubscribe",
},
follow=False,
),
"This address is not subscribed to our newsletter.",
)
self.assertEqual(len(mail.outbox), 2)
body = mail.outbox[1].body
resubscribe_url = unquote(
[line for line in body.splitlines() if "testserver" in line][0]
)
self.assertContains(
self.client.get(resubscribe_url, follow=True),
"Your subscription has been activated.",
)
self.assertEqual(Subscription.objects.filter(is_active=True).count(), 1)
self.assertContains(
self.client.get(resubscribe_url, follow=True),
"Your subscription is already active.",
)
self.assertContains(
self.client.get(
# Purposefully break the link.
resubscribe_url.replace("com:", "ch:"),
follow=True,
),
"We are sorry. This link is broken.",
status_code=200,
)
def test_backend(self):
backend = TestModelBackend(Subscription)
# Not subscribed yet.
self.assertFalse(backend.is_subscribed("<EMAIL>"))
# Subscribe.
self.assertTrue(backend.subscribe("<EMAIL>"))
# Already subscribed.
self.assertFalse(backend.subscribe("<EMAIL>"))
subscription = Subscription.objects.get()
self.assertEqual(subscription.email, "<EMAIL>")
self.assertTrue(subscription.is_active)
# Unsubscribe
self.assertEqual(None, backend.unsubscribe("<EMAIL>"))
subscription = Subscription.objects.get()
self.assertFalse(subscription.is_active)
# Does not exist, silent failure
self.assertEqual(None, backend.unsubscribe("<EMAIL>"))
subscription.is_active = True
subscription.save()
form = backend.subscription_details_form("<EMAIL>", MockRequest())
self.assertEqual(["full_name"], list(form.fields.keys()))
# That is the current behavior.
form = backend.subscription_details_form("<EMAIL>", MockRequest())
self.assertEqual(None, form)
def test_42(self):
response = self.client.post(
"/newsletter/",
{
"email": "<EMAIL>",
"action": "subscribe",
},
)
self.assertRedirects(response, "/newsletter/")
self.assertEqual(len(mail.outbox), 1)
body = mail.outbox[0].body
subscribe_url = unquote(
[line for line in body.splitlines() if "testserver" in line][0]
)
self.assertEqual(Subscription.objects.count(), 0)
response = self.client.get(subscribe_url)
self.assertRedirects(response, "/newsletter/")
self.assertEqual(Subscription.objects.filter(is_active=True).count(), 1)
def test_ajax_subscription(self):
for email in ["", "<EMAIL>", "@"]:
response = self.client.post(
"/newsletter/ajax_subscribe/", {"subscription_email": email}
)
self.assertContains(response, "Invalid email")
response = self.client.post(
"/newsletter/ajax_subscribe/", {"subscription_email": "<EMAIL>"}
)
self.assertContains(response, "You should receive a confirmation email shortly")
self.assertEqual(len(mail.outbox), 1)
body = mail.outbox[0].body
subscribe_url = unquote(
[line for line in body.splitlines() if "testserver" in line][0]
)
self.assertEqual(Subscription.objects.count(), 0)
response = self.client.get(subscribe_url)
self.assertEqual(Subscription.objects.filter(is_active=True).count(), 1)
response = self.client.post(
"/newsletter/ajax_subscribe/", {"subscription_email": "<EMAIL>"}
)
self.assertContains(response, "already subscribed")
|
1659397
|
import sys
import collections
import math
import itertools as it
def readArray(type= int)
line = input()
return [type(x) for x in line.split()]
def getFreq(arr)
freq = collections.defaultdict(int)
for x in arr
freq[x]+= 1
return freq
def getNearestLargerMultiple(n, m)
return math.ceil(nm)
def getNearestSmallerMultiple(n, m)
return nm
def solve()
n, m, a, b = readArray()
cost = 0
ex_cost = nb
if n = m
cost = (m-n)a
else
costa = (n-m)b
costb = (m getNearestLargerMultiple(n, m) - n) a
costc = abs(m getNearestSmallerMultiple(n, m) - n) b
cost = min(costa, costb, costc)
print(min(cost, ex_cost))
if __name__ == '__main__'
# sys.stdin = open('input.txt')
solve()
|
1659411
|
import pytest
import time
from tests.utils import FRONTEND_ENDPOINT
from tests.utils import GMS_ENDPOINT
from tests.utils import ingest_file_via_rest
from tests.utils import delete_urns_from_file
@pytest.fixture(scope="module", autouse=False)
def ingest_cleanup_data(request):
print("ingesting domains test data")
ingest_file_via_rest("tests/domains/data.json")
yield
print("removing domains test data")
delete_urns_from_file("tests/domains/data.json")
@pytest.mark.dependency()
def test_healthchecks(wait_for_healthchecks):
# Call to wait_for_healthchecks fixture will do the actual functionality.
pass
@pytest.mark.dependency(depends=["test_healthchecks"])
def test_create_list_get_domain(frontend_session):
# Get count of existing secrets
list_domains_json = {
"query": """query listDomains($input: ListDomainsInput!) {\n
listDomains(input: $input) {\n
start\n
count\n
total\n
domains {\n
urn\n
properties {\n
name\n
}\n
}\n
}\n
}""",
"variables": {
"input": {
"start": "0",
"count": "20"
}
}
}
response = frontend_session.post(
f"{FRONTEND_ENDPOINT}/api/v2/graphql", json=list_domains_json
)
response.raise_for_status()
res_data = response.json()
assert res_data
assert res_data["data"]
assert res_data["data"]["listDomains"]["total"] is not None
assert "errors" not in res_data
before_count = res_data["data"]["listDomains"]["total"]
print(before_count)
domain_id = "test id"
domain_name = "<NAME>"
domain_description = "test description"
# Create new Domain
create_domain_json = {
"query": """mutation createDomain($input: CreateDomainInput!) {\n
createDomain(input: $input)
}""",
"variables": {
"input": {
"id": domain_id,
"name": domain_name,
"description": domain_description
}
}
}
response = frontend_session.post(
f"{FRONTEND_ENDPOINT}/api/v2/graphql", json=create_domain_json
)
response.raise_for_status()
res_data = response.json()
assert res_data
assert res_data["data"]
assert res_data["data"]["createDomain"] is not None
assert "errors" not in res_data
domain_urn = res_data["data"]["createDomain"]
# Sleep for eventual consistency (not ideal)
time.sleep(2)
# Get new count of Domains
response = frontend_session.post(
f"{FRONTEND_ENDPOINT}/api/v2/graphql", json=list_domains_json
)
response.raise_for_status()
res_data = response.json()
assert res_data
assert res_data["data"]
assert res_data["data"]["listDomains"]["total"] is not None
assert "errors" not in res_data
# Assert that there are more domains now.
after_count = res_data["data"]["listDomains"]["total"]
print(after_count)
assert after_count == before_count + 1
# Get the domain value back
get_domain_json = {
"query": """query domain($urn: String!) {\n
domain(urn: $urn) {\n
urn\n
id\n
properties {\n
name\n
description\n
}\n
}\n
}""",
"variables": {
"urn": domain_urn
}
}
response = frontend_session.post(
f"{FRONTEND_ENDPOINT}/api/v2/graphql", json=get_domain_json
)
response.raise_for_status()
res_data = response.json()
assert res_data
assert res_data["data"]
assert res_data["data"]["domain"] is not None
assert "errors" not in res_data
domain = res_data["data"]["domain"]
assert domain["urn"] == f'urn:li:domain:{domain_id}'
assert domain["id"] == domain_id
assert domain["properties"]["name"] == domain_name
assert domain["properties"]["description"] == domain_description
delete_json = {
"urn": domain_urn
}
# Cleanup: Delete the domain
response = frontend_session.post(
f"{GMS_ENDPOINT}/entities?action=delete", json=delete_json
)
response.raise_for_status()
@pytest.mark.dependency(depends=["test_healthchecks", "test_create_list_get_domain"])
def test_set_unset_domain(frontend_session, ingest_cleanup_data):
# Set and Unset a Domain for a dataset. Note that this doesn't test for adding domains to charts, dashboards, charts, & jobs.
dataset_urn = "urn:li:dataset:(urn:li:dataPlatform:kafka,test-tags-terms-sample-kafka,PROD)"
domain_urn = "urn:li:domain:engineering"
# First unset to be sure.
unset_domain_json = {
"query": """mutation unsetDomain($entityUrn: String!) {\n
unsetDomain(entityUrn: $entityUrn)}""",
"variables": {
"entityUrn": dataset_urn
}
}
response = frontend_session.post(
f"{FRONTEND_ENDPOINT}/api/v2/graphql", json=unset_domain_json
)
response.raise_for_status()
res_data = response.json()
assert res_data
assert res_data["data"]
assert res_data["data"]["unsetDomain"] is True
assert "errors" not in res_data
# Set a new domain
set_domain_json = {
"query": """mutation setDomain($entityUrn: String!, $domainUrn: String!) {\n
setDomain(entityUrn: $entityUrn, domainUrn: $domainUrn)}""",
"variables": {
"entityUrn": dataset_urn,
"domainUrn": domain_urn
}
}
response = frontend_session.post(
f"{FRONTEND_ENDPOINT}/api/v2/graphql", json=set_domain_json
)
response.raise_for_status()
res_data = response.json()
assert res_data
assert res_data["data"]
assert res_data["data"]["setDomain"] is True
assert "errors" not in res_data
# Now, fetch the dataset's domain and confirm it was set.GMS_ENDPOINT
get_dataset_json = {
"query": """query dataset($urn: String!) {\n
dataset(urn: $urn) {\n
urn\n
domain {\n
urn\n
properties{\n
name\n
}\n
}\n
}\n
}""",
"variables": {
"urn": dataset_urn
}
}
response = frontend_session.post(
f"{FRONTEND_ENDPOINT}/api/v2/graphql", json=get_dataset_json
)
response.raise_for_status()
res_data = response.json()
assert res_data
assert res_data["data"]["dataset"]["domain"]["urn"] == domain_urn
assert res_data["data"]["dataset"]["domain"]["properties"]["name"] == "Engineering"
|
1659419
|
from insights.parsers.current_clocksource import CurrentClockSource
from insights.tests import context_wrap
CLKSRC = """
tsc
"""
def test_get_current_clksr():
clksrc = CurrentClockSource(context_wrap(CLKSRC))
assert clksrc.data == "tsc"
assert clksrc.is_kvm is False
assert clksrc.is_vmi_timer != clksrc.is_tsc
|
1659428
|
import math
import torch
from torch import nn
import torch.distributed as dist
import deepspeed
from deepspeed.utils import log_dist
from fairseq import tasks, distributed_utils
from fairseq.logging import metrics
from examples.training.deepspeed.ds_fairseq_data import BatchIterator
from examples.training.deepspeed.ds_fairseq_argument import gen_ds_fairseq_arg
best_bleu = 0.0
def torch_reduce_sum(
device,
logging_outputs,
*extra_stats_to_sum,
ignore=False,
):
"""
Sync logging outputs across workers. fast_stat_sync_sum is
faster than all_gather_list_sync, but is only suitable when
logging outputs are scalars and can be summed. Note that
*logging_outputs* cannot contain any nested dicts/lists.
"""
data = {}
for i, stat in enumerate(extra_stats_to_sum):
data["extra_stats_" + str(i)] = stat
if len(logging_outputs) > 0:
log_keys = list(logging_outputs[0].keys())
for k in log_keys:
if not ignore:
v = sum(log[k] for log in logging_outputs if k in log)
else:
v = logging_outputs[0][k]
v = torch.zeros_like(v) if torch.is_tensor(v) else 0
data["logging_outputs_" + k] = v
else:
log_keys = None
data = distributed_utils.all_reduce_dict(data, device=device, group=None)
extra_stats_to_sum = [
data["extra_stats_" + str(i)] for i in range(len(extra_stats_to_sum))
]
if log_keys is not None:
logging_outputs = [{k: data["logging_outputs_" + k] for k in log_keys}]
else:
logging_outputs = []
return logging_outputs, extra_stats_to_sum
def view_log(log_dict):
if "bleu" in log_dict:
global best_bleu
best_bleu = max(best_bleu, log_dict["bleu"])
log_dict["best_bleu"] = best_bleu
tmp = [f"({k}, {v})" for k, v in log_dict.items()]
return " ".join(tmp)
class DsFairseqModel(nn.Module):
def __init__(self, model, criterion):
super(DsFairseqModel, self).__init__()
self.model = model
self.criterion = criterion
def forward(self, sample):
loss, sample_size, logging_output = self.criterion(self.model, sample)
return loss, sample_size, logging_output
class DsFairseqTrainer(object):
def __init__(self, fs_args, ds_config, task):
self.fs_args = fs_args
self.ds_config = ds_config
self.task = task
model = task.build_model(fs_args)
self.criterion = task.build_criterion(fs_args)
model = DsFairseqModel(model, self.criterion)
self.prepare_model_optimizer(model)
def prepare_model_optimizer(self, model):
# Initialize torch distributed
deepspeed.init_distributed(dist_backend="nccl")
# FIXME
from dataclasses import dataclass
@dataclass
class TmpClass:
local_rank: int
fake_arg = TmpClass(self.fs_args.device_id)
# DeepSpeed initializer handles FP16, distributed, optimizer automatically.
self.model, self.optimizer, _, _ = deepspeed.initialize(
args=fake_arg,
model=model,
model_parameters=model.parameters(),
config_params=self.ds_config,
)
def reduce_log(self, logging_outputs, sample_size):
with metrics.aggregate() as agg:
if logging_outputs is not None:
self.task.reduce_metrics(logging_outputs, self.criterion)
del logging_outputs
logging_output = agg.get_smoothed_values()
logging_output["sample_size"] = sample_size
return logging_output
@metrics.aggregate("train_inner")
def train_step(self, sample, is_dummy_batch):
self.model.train()
self.model.zero_grad()
loss, sample_size, logging_output = self.model(sample)
if is_dummy_batch:
if torch.is_tensor(sample_size):
sample_size.zero_()
else:
sample_size *= 0.0
loss *= 0.0
if torch.is_tensor(sample_size):
sample_size = sample_size.float()
else:
sample_size = float(sample_size)
logging_outputs, (sample_size,) = torch_reduce_sum(
self.model.device, [logging_output], sample_size, ignore=is_dummy_batch
)
final_loss = loss * (dist.get_world_size() / sample_size)
self.model.backward(final_loss)
self.model.step()
logging_output = self.reduce_log(logging_outputs, sample_size)
if self.model.global_steps % self.model.steps_per_print() != 0:
return
log_dist(
f'Step: {self.model.global_steps}, \
{view_log(metrics.get_smoothed_values("train_inner"))}',
[0],
)
metrics.reset_meters("train_inner")
def valid_step(self, batch_itr):
if self.model.global_steps % self.fs_args.validate_interval_updates != 0:
return
with torch.no_grad():
self.model.eval()
for subset in batch_itr.valid_dataset():
with metrics.aggregate(new_root=True) as agg:
for batch, is_dummy_batch in batch_itr.valid_batch():
_, sample_size, logging_output = self.task.valid_step(
batch, self.model.module.model, self.model.module.criterion
)
logging_outputs = [logging_output]
if is_dummy_batch:
if torch.is_tensor(sample_size):
sample_size.zero_()
else:
sample_size *= 0.0
logging_outputs, (sample_size,) = torch_reduce_sum(
self.model.device,
logging_outputs,
sample_size,
ignore=is_dummy_batch,
)
logging_output = self.reduce_log(logging_outputs, sample_size)
log_dist(
"Valid on step: {}, dataset: {}. {}".format(
self.model.global_steps,
subset,
view_log(agg.get_smoothed_values()),
),
ranks=[0],
)
@metrics.aggregate("train")
def train(batch_itr, trainer):
for batch, is_dummy_batch in batch_itr.train_batch():
trainer.train_step(batch, is_dummy_batch)
trainer.valid_step(batch_itr)
def set_seed(seed):
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
def tmp():
fs_args, ds_config = gen_ds_fairseq_arg()
set_seed(fs_args.seed)
task = tasks.setup_task(fs_args)
trainer = DsFairseqTrainer(fs_args, ds_config, task)
batch_itr = BatchIterator(fs_args, task)
for epoch in batch_itr.train_epoch():
train(batch_itr, trainer)
log_dist(
f'Finish epoch {epoch}, \
{view_log(metrics.get_smoothed_values("train"))}',
[0],
)
metrics.reset_meters("train")
if __name__ == "__main__":
tmp()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.