hexsha
stringlengths 40
40
| size
int64 2
1.02M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
245
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
245
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 2
1.02M
| avg_line_length
float64 1
417k
| max_line_length
int64 1
987k
| alphanum_fraction
float64 0
1
| content_no_comment
stringlengths 0
1.01M
| is_comment_constant_removed
bool 1
class | is_sharp_comment_removed
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f70543a92105e9ffaab879256ddfee6d1ffc3133
| 789
|
py
|
Python
|
app/plugins/task/command.py
|
criticallycode/zima
|
cd38cac1c0c33b362d110ae28deba3828daa3f4a
|
[
"Apache-2.0"
] | null | null | null |
app/plugins/task/command.py
|
criticallycode/zima
|
cd38cac1c0c33b362d110ae28deba3828daa3f4a
|
[
"Apache-2.0"
] | null | null | null |
app/plugins/task/command.py
|
criticallycode/zima
|
cd38cac1c0c33b362d110ae28deba3828daa3f4a
|
[
"Apache-2.0"
] | null | null | null |
from systems.plugins.index import BaseProvider
import re
import shlex
class Provider(BaseProvider('task', 'command')):
def execute(self, results, params):
env = self._env_vars(params)
stdin = params.pop('input', self.field_input)
cwd = params.pop('cwd', self.field_cwd)
display = params.pop('display', self.field_display)
options = self._merge_options(self.field_options, params, self.field_lock)
command = self._interpolate(self.field_command, options)
if self.field_sudo:
command = 'sudo ' + command[0]
else:
command = command[0]
self.command.sh(shlex.split(command),
input = stdin,
display = display,
env = env,
cwd = cwd
)
| 28.178571
| 82
| 0.603295
|
from systems.plugins.index import BaseProvider
import re
import shlex
class Provider(BaseProvider('task', 'command')):
def execute(self, results, params):
env = self._env_vars(params)
stdin = params.pop('input', self.field_input)
cwd = params.pop('cwd', self.field_cwd)
display = params.pop('display', self.field_display)
options = self._merge_options(self.field_options, params, self.field_lock)
command = self._interpolate(self.field_command, options)
if self.field_sudo:
command = 'sudo ' + command[0]
else:
command = command[0]
self.command.sh(shlex.split(command),
input = stdin,
display = display,
env = env,
cwd = cwd
)
| true
| true
|
f70543b92bf5c3f32227c1e22d912116c437eda4
| 10,459
|
py
|
Python
|
kolibri/utils/tests/test_cli.py
|
FollonSaxBass/kolibri
|
4cf820b14386aecc228fecff64c847bad407cbb1
|
[
"MIT"
] | 2
|
2021-05-13T10:20:46.000Z
|
2021-11-15T12:31:03.000Z
|
kolibri/utils/tests/test_cli.py
|
camellia26/kolibri
|
7f1cb794c93f37e039be22f56a5ac1989ed22bde
|
[
"MIT"
] | 2
|
2021-09-24T11:36:21.000Z
|
2021-09-29T16:09:25.000Z
|
kolibri/utils/tests/test_cli.py
|
camellia26/kolibri
|
7f1cb794c93f37e039be22f56a5ac1989ed22bde
|
[
"MIT"
] | null | null | null |
"""
Tests for `kolibri.utils.cli` module.
"""
from __future__ import absolute_import
from __future__ import print_function
import logging
import os
import tempfile
import pytest
from django.db.utils import OperationalError
from mock import patch
import kolibri
from kolibri.plugins.utils import autoremove_unavailable_plugins
from kolibri.utils import cli
from kolibri.utils import options
logger = logging.getLogger(__name__)
LOG_LOGGER = []
def log_logger(logger_instance, LEVEL, msg, args, **kwargs):
"""
Monkeypatching for logging.Logger._log to scoop up log messages if we wanna
test something specific was logged.
"""
LOG_LOGGER.append((LEVEL, msg))
# Call the original function
logger_instance.__log(LEVEL, msg, args, **kwargs)
def activate_log_logger(monkeypatch):
"""
Activates logging everything to ``LOG_LOGGER`` with the monkeypatch pattern
of py.test (test accepts a ``monkeypatch`` argument)
"""
monkeypatch.setattr(logging.Logger, "__log", logging.Logger._log, raising=False)
monkeypatch.setattr(logging.Logger, "_log", log_logger)
@pytest.fixture
def plugins():
from kolibri import plugins
_, config_file = tempfile.mkstemp(suffix="json")
old_config_file = plugins.conf_file
plugins.conf_file = config_file
plugins.config.set_defaults()
yield plugins
plugins.conf_file = old_config_file
def test_bogus_plugin_autoremove(plugins):
"""
Checks that a plugin is auto-removed when it cannot be imported
"""
plugin_name = "giraffe.horse"
plugins.config["INSTALLED_PLUGINS"].add(plugin_name)
plugins.config.save()
autoremove_unavailable_plugins()
assert plugin_name not in plugins.config["INSTALLED_PLUGINS"]
def test_bogus_plugin_autoremove_no_path(plugins):
"""
Checks that a plugin without a dotted path is also auto-removed
"""
plugin_name = "giraffehorse"
plugins.config["INSTALLED_PLUGINS"].add(plugin_name)
plugins.config.save()
autoremove_unavailable_plugins()
assert plugin_name not in plugins.config["INSTALLED_PLUGINS"]
def test_bogus_plugin_disable(plugins):
installed_apps_before = plugins.config["INSTALLED_PLUGINS"].copy()
disabled_apps_before = plugins.config["DISABLED_PLUGINS"].copy()
try:
cli.disable.callback(("i_do_not_exist",), False)
except Exception:
pass
assert installed_apps_before == plugins.config["INSTALLED_PLUGINS"]
assert disabled_apps_before == plugins.config["DISABLED_PLUGINS"]
def test_plugin_cannot_be_imported_disable(plugins):
"""
A plugin may be in plugins.config['INSTALLED_PLUGINS'] but broken or uninstalled
"""
plugin_name = "giraffe.horse"
plugins.config["INSTALLED_PLUGINS"].add(plugin_name)
plugins.config.save()
try:
cli.disable.callback((plugin_name,), False)
except Exception:
pass
assert plugin_name not in plugins.config["INSTALLED_PLUGINS"]
# We also don't want to endlessly add cruft to the disabled apps
assert plugin_name not in plugins.config["DISABLED_PLUGINS"]
def test_real_plugin_disable(plugins):
installed_apps_before = plugins.config["INSTALLED_PLUGINS"].copy()
test_plugin = "kolibri.plugins.media_player"
assert test_plugin in installed_apps_before
# Because RIP example plugin
cli.disable.callback((test_plugin,), False)
assert test_plugin not in plugins.config["INSTALLED_PLUGINS"]
assert test_plugin in plugins.config["DISABLED_PLUGINS"]
def test_real_plugin_disable_twice(plugins):
installed_apps_before = plugins.config["INSTALLED_PLUGINS"].copy()
test_plugin = "kolibri.plugins.media_player"
assert test_plugin in installed_apps_before
cli.disable.callback((test_plugin,), False)
assert test_plugin not in plugins.config.ACTIVE_PLUGINS
assert test_plugin not in plugins.config["INSTALLED_PLUGINS"]
assert test_plugin in plugins.config["DISABLED_PLUGINS"]
installed_apps_before = plugins.config["INSTALLED_PLUGINS"].copy()
cli.disable.callback((test_plugin,), False)
assert test_plugin not in plugins.config.ACTIVE_PLUGINS
assert test_plugin not in plugins.config["INSTALLED_PLUGINS"]
assert test_plugin in plugins.config["DISABLED_PLUGINS"]
def test_plugin_with_no_plugin_class(plugins):
"""
Expected behavior is that nothing blows up with exceptions, user just gets
a warning and nothing is enabled or changed in the configuration.
"""
# For fun, we pass in a system library
installed_apps_before = plugins.config["INSTALLED_PLUGINS"].copy()
try:
cli.enable.callback(("os.path",), False)
except Exception:
pass
assert installed_apps_before == plugins.config["INSTALLED_PLUGINS"]
@pytest.mark.django_db
def test_kolibri_listen_port_env(monkeypatch):
"""
Starts and stops the server, mocking the actual server.start()
Checks that the correct fallback port is used from the environment.
"""
with patch("django.core.management.call_command"), patch(
"kolibri.utils.server.start"
) as start:
from kolibri.utils import server
def start_mock(port, *args, **kwargs):
assert port == test_port
try:
os.remove(server.STARTUP_LOCK)
except OSError:
pass
activate_log_logger(monkeypatch)
start.side_effect = start_mock
test_port = 1234
os.environ["KOLIBRI_HTTP_PORT"] = str(test_port)
# force a reload of plugins.OPTIONS so the environment variable will be read in
from kolibri.utils import conf
conf.OPTIONS.update(options.read_options_file(conf.KOLIBRI_HOME))
cli.start.callback(test_port, False)
with pytest.raises(SystemExit) as excinfo:
cli.stop.callback()
assert excinfo.code == 0
# Stop the server AGAIN, asserting that we can call the stop command
# on an already stopped server and will be gracefully informed about
# it.
with pytest.raises(SystemExit) as excinfo:
cli.stop.callback()
assert excinfo.code == 0
assert "Already stopped" in LOG_LOGGER[-1][1]
def status_starting_up():
raise server.NotRunning(server.STATUS_STARTING_UP)
# Ensure that if a server is reported to be 'starting up', it doesn't
# get killed while doing that.
monkeypatch.setattr(server, "get_status", status_starting_up)
with pytest.raises(SystemExit) as excinfo:
cli.stop.callback()
assert excinfo.code == server.STATUS_STARTING_UP
assert "Not stopped" in LOG_LOGGER[-1][1]
@pytest.mark.django_db
@patch("kolibri.utils.cli.get_version", return_value="")
@patch("kolibri.utils.cli.update")
@patch("kolibri.utils.cli.plugin.callback")
@patch("kolibri.core.deviceadmin.utils.dbbackup")
def test_first_run(dbbackup, plugin, update, get_version):
"""
Tests that the first_run() function performs as expected
"""
cli.initialize()
update.assert_called_once()
dbbackup.assert_not_called()
# Check that it got called for each default plugin
from kolibri import plugins
assert set(plugins.config["INSTALLED_PLUGINS"]) == set(plugins.DEFAULT_PLUGINS)
@pytest.mark.django_db
@patch("kolibri.utils.cli.get_version", return_value="0.0.1")
@patch("kolibri.utils.cli.update")
def test_update(update, get_version):
"""
Tests that update() function performs as expected
"""
cli.initialize()
update.assert_called_once()
@pytest.mark.django_db
@patch("kolibri.utils.cli.get_version", return_value="0.0.1")
def test_update_exits_if_running(get_version):
"""
Tests that update() function performs as expected
"""
with patch("kolibri.utils.cli.server.get_status"):
try:
cli.initialize()
pytest.fail("Update did not exit when Kolibri was already running")
except SystemExit:
pass
@pytest.mark.django_db
def test_version_updated():
"""
Tests our db backup logic: version_updated gets any change, backup gets only non-dev changes
"""
assert cli.version_updated("0.10.0", "0.10.1")
assert not cli.version_updated("0.10.0", "0.10.0")
assert not cli.should_back_up("0.10.0-dev0", "")
assert not cli.should_back_up("0.10.0-dev0", "0.10.0")
assert not cli.should_back_up("0.10.0", "0.10.0-dev0")
assert not cli.should_back_up("0.10.0-dev0", "0.10.0-dev0")
@pytest.mark.django_db
@patch("kolibri.utils.cli.get_version", return_value=kolibri.__version__)
@patch("kolibri.utils.cli.update")
@patch("kolibri.core.deviceadmin.utils.dbbackup")
def test_update_no_version_change(dbbackup, update, get_version):
"""
Tests that when the version doesn't change, we are not doing things we
shouldn't
"""
cli.initialize()
update.assert_not_called()
dbbackup.assert_not_called()
def test_cli_usage():
# Test the -h
with pytest.raises(SystemExit) as excinfo:
cli.main("-h")
assert excinfo.code == 0
with pytest.raises(SystemExit) as excinfo:
cli.main("--version")
assert excinfo.code == 0
@patch("kolibri.utils.cli.click.echo")
def test_list_plugins(echo_mock, plugins):
cli.list.callback()
test_plugin = "kolibri.plugins.media_player"
any(
map(
lambda x: test_plugin in x[0] and "ENABLED" in x[0],
echo_mock.call_args_list,
)
)
@patch("kolibri.utils.cli.click.echo")
def test_list_plugins_disabled(echo_mock, plugins):
cli.list.callback()
test_plugin = "kolibri.plugins.media_player"
cli.disable.callback((test_plugin,), False)
any(
map(
lambda x: test_plugin in x[0] and "DISABLED" in x[0],
echo_mock.call_args_list,
)
)
@patch("kolibri.utils.cli._migrate_databases")
@patch("kolibri.utils.cli.version_updated")
def test_migrate_if_unmigrated(version_updated, _migrate_databases):
# No matter what, ensure that version_updated returns False
version_updated.return_value = False
from morango.models import InstanceIDModel
with patch.object(
InstanceIDModel, "get_or_create_current_instance"
) as get_or_create_current_instance:
get_or_create_current_instance.side_effect = OperationalError("Test")
cli.initialize()
_migrate_databases.assert_called_once()
| 32.582555
| 96
| 0.707716
|
from __future__ import absolute_import
from __future__ import print_function
import logging
import os
import tempfile
import pytest
from django.db.utils import OperationalError
from mock import patch
import kolibri
from kolibri.plugins.utils import autoremove_unavailable_plugins
from kolibri.utils import cli
from kolibri.utils import options
logger = logging.getLogger(__name__)
LOG_LOGGER = []
def log_logger(logger_instance, LEVEL, msg, args, **kwargs):
LOG_LOGGER.append((LEVEL, msg))
logger_instance.__log(LEVEL, msg, args, **kwargs)
def activate_log_logger(monkeypatch):
monkeypatch.setattr(logging.Logger, "__log", logging.Logger._log, raising=False)
monkeypatch.setattr(logging.Logger, "_log", log_logger)
@pytest.fixture
def plugins():
from kolibri import plugins
_, config_file = tempfile.mkstemp(suffix="json")
old_config_file = plugins.conf_file
plugins.conf_file = config_file
plugins.config.set_defaults()
yield plugins
plugins.conf_file = old_config_file
def test_bogus_plugin_autoremove(plugins):
plugin_name = "giraffe.horse"
plugins.config["INSTALLED_PLUGINS"].add(plugin_name)
plugins.config.save()
autoremove_unavailable_plugins()
assert plugin_name not in plugins.config["INSTALLED_PLUGINS"]
def test_bogus_plugin_autoremove_no_path(plugins):
plugin_name = "giraffehorse"
plugins.config["INSTALLED_PLUGINS"].add(plugin_name)
plugins.config.save()
autoremove_unavailable_plugins()
assert plugin_name not in plugins.config["INSTALLED_PLUGINS"]
def test_bogus_plugin_disable(plugins):
installed_apps_before = plugins.config["INSTALLED_PLUGINS"].copy()
disabled_apps_before = plugins.config["DISABLED_PLUGINS"].copy()
try:
cli.disable.callback(("i_do_not_exist",), False)
except Exception:
pass
assert installed_apps_before == plugins.config["INSTALLED_PLUGINS"]
assert disabled_apps_before == plugins.config["DISABLED_PLUGINS"]
def test_plugin_cannot_be_imported_disable(plugins):
plugin_name = "giraffe.horse"
plugins.config["INSTALLED_PLUGINS"].add(plugin_name)
plugins.config.save()
try:
cli.disable.callback((plugin_name,), False)
except Exception:
pass
assert plugin_name not in plugins.config["INSTALLED_PLUGINS"]
assert plugin_name not in plugins.config["DISABLED_PLUGINS"]
def test_real_plugin_disable(plugins):
installed_apps_before = plugins.config["INSTALLED_PLUGINS"].copy()
test_plugin = "kolibri.plugins.media_player"
assert test_plugin in installed_apps_before
# Because RIP example plugin
cli.disable.callback((test_plugin,), False)
assert test_plugin not in plugins.config["INSTALLED_PLUGINS"]
assert test_plugin in plugins.config["DISABLED_PLUGINS"]
def test_real_plugin_disable_twice(plugins):
installed_apps_before = plugins.config["INSTALLED_PLUGINS"].copy()
test_plugin = "kolibri.plugins.media_player"
assert test_plugin in installed_apps_before
cli.disable.callback((test_plugin,), False)
assert test_plugin not in plugins.config.ACTIVE_PLUGINS
assert test_plugin not in plugins.config["INSTALLED_PLUGINS"]
assert test_plugin in plugins.config["DISABLED_PLUGINS"]
installed_apps_before = plugins.config["INSTALLED_PLUGINS"].copy()
cli.disable.callback((test_plugin,), False)
assert test_plugin not in plugins.config.ACTIVE_PLUGINS
assert test_plugin not in plugins.config["INSTALLED_PLUGINS"]
assert test_plugin in plugins.config["DISABLED_PLUGINS"]
def test_plugin_with_no_plugin_class(plugins):
# For fun, we pass in a system library
installed_apps_before = plugins.config["INSTALLED_PLUGINS"].copy()
try:
cli.enable.callback(("os.path",), False)
except Exception:
pass
assert installed_apps_before == plugins.config["INSTALLED_PLUGINS"]
@pytest.mark.django_db
def test_kolibri_listen_port_env(monkeypatch):
with patch("django.core.management.call_command"), patch(
"kolibri.utils.server.start"
) as start:
from kolibri.utils import server
def start_mock(port, *args, **kwargs):
assert port == test_port
try:
os.remove(server.STARTUP_LOCK)
except OSError:
pass
activate_log_logger(monkeypatch)
start.side_effect = start_mock
test_port = 1234
os.environ["KOLIBRI_HTTP_PORT"] = str(test_port)
# force a reload of plugins.OPTIONS so the environment variable will be read in
from kolibri.utils import conf
conf.OPTIONS.update(options.read_options_file(conf.KOLIBRI_HOME))
cli.start.callback(test_port, False)
with pytest.raises(SystemExit) as excinfo:
cli.stop.callback()
assert excinfo.code == 0
# Stop the server AGAIN, asserting that we can call the stop command
# on an already stopped server and will be gracefully informed about
# it.
with pytest.raises(SystemExit) as excinfo:
cli.stop.callback()
assert excinfo.code == 0
assert "Already stopped" in LOG_LOGGER[-1][1]
def status_starting_up():
raise server.NotRunning(server.STATUS_STARTING_UP)
# Ensure that if a server is reported to be 'starting up', it doesn't
monkeypatch.setattr(server, "get_status", status_starting_up)
with pytest.raises(SystemExit) as excinfo:
cli.stop.callback()
assert excinfo.code == server.STATUS_STARTING_UP
assert "Not stopped" in LOG_LOGGER[-1][1]
@pytest.mark.django_db
@patch("kolibri.utils.cli.get_version", return_value="")
@patch("kolibri.utils.cli.update")
@patch("kolibri.utils.cli.plugin.callback")
@patch("kolibri.core.deviceadmin.utils.dbbackup")
def test_first_run(dbbackup, plugin, update, get_version):
cli.initialize()
update.assert_called_once()
dbbackup.assert_not_called()
from kolibri import plugins
assert set(plugins.config["INSTALLED_PLUGINS"]) == set(plugins.DEFAULT_PLUGINS)
@pytest.mark.django_db
@patch("kolibri.utils.cli.get_version", return_value="0.0.1")
@patch("kolibri.utils.cli.update")
def test_update(update, get_version):
cli.initialize()
update.assert_called_once()
@pytest.mark.django_db
@patch("kolibri.utils.cli.get_version", return_value="0.0.1")
def test_update_exits_if_running(get_version):
with patch("kolibri.utils.cli.server.get_status"):
try:
cli.initialize()
pytest.fail("Update did not exit when Kolibri was already running")
except SystemExit:
pass
@pytest.mark.django_db
def test_version_updated():
assert cli.version_updated("0.10.0", "0.10.1")
assert not cli.version_updated("0.10.0", "0.10.0")
assert not cli.should_back_up("0.10.0-dev0", "")
assert not cli.should_back_up("0.10.0-dev0", "0.10.0")
assert not cli.should_back_up("0.10.0", "0.10.0-dev0")
assert not cli.should_back_up("0.10.0-dev0", "0.10.0-dev0")
@pytest.mark.django_db
@patch("kolibri.utils.cli.get_version", return_value=kolibri.__version__)
@patch("kolibri.utils.cli.update")
@patch("kolibri.core.deviceadmin.utils.dbbackup")
def test_update_no_version_change(dbbackup, update, get_version):
cli.initialize()
update.assert_not_called()
dbbackup.assert_not_called()
def test_cli_usage():
with pytest.raises(SystemExit) as excinfo:
cli.main("-h")
assert excinfo.code == 0
with pytest.raises(SystemExit) as excinfo:
cli.main("--version")
assert excinfo.code == 0
@patch("kolibri.utils.cli.click.echo")
def test_list_plugins(echo_mock, plugins):
cli.list.callback()
test_plugin = "kolibri.plugins.media_player"
any(
map(
lambda x: test_plugin in x[0] and "ENABLED" in x[0],
echo_mock.call_args_list,
)
)
@patch("kolibri.utils.cli.click.echo")
def test_list_plugins_disabled(echo_mock, plugins):
cli.list.callback()
test_plugin = "kolibri.plugins.media_player"
cli.disable.callback((test_plugin,), False)
any(
map(
lambda x: test_plugin in x[0] and "DISABLED" in x[0],
echo_mock.call_args_list,
)
)
@patch("kolibri.utils.cli._migrate_databases")
@patch("kolibri.utils.cli.version_updated")
def test_migrate_if_unmigrated(version_updated, _migrate_databases):
version_updated.return_value = False
from morango.models import InstanceIDModel
with patch.object(
InstanceIDModel, "get_or_create_current_instance"
) as get_or_create_current_instance:
get_or_create_current_instance.side_effect = OperationalError("Test")
cli.initialize()
_migrate_databases.assert_called_once()
| true
| true
|
f70544b0ddc04df283be747eb408cbc27a54108d
| 284
|
py
|
Python
|
Basic-Python/code/test_magic/3.py
|
johnnynode/AI-LEARNING-MATERIAL
|
1719f5b6ecb9b9caf485b9d806c1211b142b8ed5
|
[
"MIT"
] | 2
|
2018-06-08T00:40:17.000Z
|
2018-06-08T05:27:30.000Z
|
Basic-Python/code/test_magic/3.py
|
johnnynode/AI-LEARNING-MATERIAL
|
1719f5b6ecb9b9caf485b9d806c1211b142b8ed5
|
[
"MIT"
] | null | null | null |
Basic-Python/code/test_magic/3.py
|
johnnynode/AI-LEARNING-MATERIAL
|
1719f5b6ecb9b9caf485b9d806c1211b142b8ed5
|
[
"MIT"
] | null | null | null |
class Person:
name='zhangsan'
age=20
p = Person()
print(p) # <__main__.Person object at 0x10073e668>
print('⭐️ ' * 20)
class Stu:
name='zhangsan'
age=20
def __str__(self):
return "name: %s; age: %d"%(self.name, self.age)
s = Stu()
print(s) # name: zhangsan; age: 20
| 15.777778
| 52
| 0.623239
|
class Person:
name='zhangsan'
age=20
p = Person()
print(p)
print('⭐️ ' * 20)
class Stu:
name='zhangsan'
age=20
def __str__(self):
return "name: %s; age: %d"%(self.name, self.age)
s = Stu()
print(s)
| true
| true
|
f70544f6140f506ebae794b51d635f46a4f446d2
| 3,088
|
py
|
Python
|
src/layers/transformers/sublayers.py
|
DwaraknathT/sparsity
|
705f2cba074e6ab4f7655c6af98882773cd826bf
|
[
"MIT"
] | null | null | null |
src/layers/transformers/sublayers.py
|
DwaraknathT/sparsity
|
705f2cba074e6ab4f7655c6af98882773cd826bf
|
[
"MIT"
] | null | null | null |
src/layers/transformers/sublayers.py
|
DwaraknathT/sparsity
|
705f2cba074e6ab4f7655c6af98882773cd826bf
|
[
"MIT"
] | null | null | null |
""" Define the sublayers in encoder/decoder layer """
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
class ScaledDotProductAttention(nn.Module):
""" Scaled Dot-Product Attention """
def __init__(self, temperature, attn_dropout=0.1):
super().__init__()
self.temperature = temperature
def forward(self, q, k, v, mask=None):
# Scale based on the current shape
attn = torch.matmul(q / (q.shape[-1] ** 0.5), k.transpose(2, 3))
if mask is not None:
attn = attn.masked_fill(mask == 0, -1e9)
attn = F.softmax(attn, dim=-1)
output = torch.matmul(attn, v)
return output, attn
class MultiHeadAttention(nn.Module):
""" Multi-Head Attention module """
def __init__(self, n_head, d_model, d_k, d_v, dropout=0.1):
super().__init__()
self.n_head = n_head
self.d_k = d_k
self.d_v = d_v
self.w_qs = nn.Linear(d_model, n_head * d_k, bias=False)
self.w_ks = nn.Linear(d_model, n_head * d_k, bias=False)
self.w_vs = nn.Linear(d_model, n_head * d_v, bias=False)
self.fc = nn.Linear(n_head * d_v, d_model, bias=False)
self.attention = ScaledDotProductAttention(temperature=d_k ** 0.5)
self.dropout = nn.Dropout(dropout)
self.layer_norm = nn.LayerNorm(d_model, eps=1e-6)
def forward(self, q, k, v, mask=None):
d_k, d_v, n_head = self.d_k, self.d_v, self.n_head
sz_b, len_q, len_k, len_v = q.size(0), q.size(1), k.size(1), v.size(1)
residual = q
# Pass through the pre-attention projection: b x lq x (n*dv)
# Separate different heads: b x lq x n x dv
q = self.w_qs(q).view(sz_b, len_q, n_head, d_k)
k = self.w_ks(k).view(sz_b, len_k, n_head, d_k)
v = self.w_vs(v).view(sz_b, len_v, n_head, d_v)
# Transpose for attention dot product: b x n x lq x dv
q, k, v = q.transpose(1, 2), k.transpose(1, 2), v.transpose(1, 2)
if mask is not None:
mask = mask.unsqueeze(1) # For head axis broadcasting.
q, attn = self.attention(q, k, v, mask=mask)
# Transpose to move the head dimension back: b x lq x n x dv
# Combine the last two dimensions to concatenate all the heads together: b x lq x (n*dv)
q = q.transpose(1, 2).contiguous().view(sz_b, len_q, -1)
q = self.dropout(self.fc(q))
q += residual
q = self.layer_norm(q)
return q, attn
class PositionwiseFeedForward(nn.Module):
""" A two-feed-forward-layer module """
def __init__(self, d_in, d_hid, dropout=0.1):
super().__init__()
self.w_1 = nn.Linear(d_in, d_hid) # position-wise
self.w_2 = nn.Linear(d_hid, d_in) # position-wise
self.layer_norm = nn.LayerNorm(d_in, eps=1e-6)
self.dropout = nn.Dropout(dropout)
def forward(self, x):
residual = x
x = self.w_2(F.relu(self.w_1(x)))
x = self.dropout(x)
x += residual
x = self.layer_norm(x)
return x
| 31.191919
| 96
| 0.599093
|
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
class ScaledDotProductAttention(nn.Module):
def __init__(self, temperature, attn_dropout=0.1):
super().__init__()
self.temperature = temperature
def forward(self, q, k, v, mask=None):
attn = torch.matmul(q / (q.shape[-1] ** 0.5), k.transpose(2, 3))
if mask is not None:
attn = attn.masked_fill(mask == 0, -1e9)
attn = F.softmax(attn, dim=-1)
output = torch.matmul(attn, v)
return output, attn
class MultiHeadAttention(nn.Module):
def __init__(self, n_head, d_model, d_k, d_v, dropout=0.1):
super().__init__()
self.n_head = n_head
self.d_k = d_k
self.d_v = d_v
self.w_qs = nn.Linear(d_model, n_head * d_k, bias=False)
self.w_ks = nn.Linear(d_model, n_head * d_k, bias=False)
self.w_vs = nn.Linear(d_model, n_head * d_v, bias=False)
self.fc = nn.Linear(n_head * d_v, d_model, bias=False)
self.attention = ScaledDotProductAttention(temperature=d_k ** 0.5)
self.dropout = nn.Dropout(dropout)
self.layer_norm = nn.LayerNorm(d_model, eps=1e-6)
def forward(self, q, k, v, mask=None):
d_k, d_v, n_head = self.d_k, self.d_v, self.n_head
sz_b, len_q, len_k, len_v = q.size(0), q.size(1), k.size(1), v.size(1)
residual = q
q = self.w_qs(q).view(sz_b, len_q, n_head, d_k)
k = self.w_ks(k).view(sz_b, len_k, n_head, d_k)
v = self.w_vs(v).view(sz_b, len_v, n_head, d_v)
q, k, v = q.transpose(1, 2), k.transpose(1, 2), v.transpose(1, 2)
if mask is not None:
mask = mask.unsqueeze(1)
q, attn = self.attention(q, k, v, mask=mask)
q = q.transpose(1, 2).contiguous().view(sz_b, len_q, -1)
q = self.dropout(self.fc(q))
q += residual
q = self.layer_norm(q)
return q, attn
class PositionwiseFeedForward(nn.Module):
def __init__(self, d_in, d_hid, dropout=0.1):
super().__init__()
self.w_1 = nn.Linear(d_in, d_hid)
self.w_2 = nn.Linear(d_hid, d_in)
self.layer_norm = nn.LayerNorm(d_in, eps=1e-6)
self.dropout = nn.Dropout(dropout)
def forward(self, x):
residual = x
x = self.w_2(F.relu(self.w_1(x)))
x = self.dropout(x)
x += residual
x = self.layer_norm(x)
return x
| true
| true
|
f705463c73b3b08df3a5efe1d09bfeb5a1f30dce
| 33,830
|
py
|
Python
|
StdChal.py
|
pzread/judge
|
9721caff6bda5c9d35edb581c060606824fad6d8
|
[
"MIT"
] | 25
|
2015-03-14T08:13:22.000Z
|
2020-07-30T15:34:34.000Z
|
StdChal.py
|
pzread/judge
|
9721caff6bda5c9d35edb581c060606824fad6d8
|
[
"MIT"
] | 32
|
2016-02-16T08:38:52.000Z
|
2016-08-18T08:12:15.000Z
|
StdChal.py
|
pzread/judge
|
9721caff6bda5c9d35edb581c060606824fad6d8
|
[
"MIT"
] | 8
|
2015-10-14T10:27:21.000Z
|
2020-08-01T17:11:20.000Z
|
'''Standard challenge module.'''
import os
import shutil
import fcntl
from cffi import FFI
from tornado import gen, concurrent, process
from tornado.stack_context import StackContext
from tornado.ioloop import IOLoop
import PyExt
import Privilege
import Config
from Utils import FileUtils
STATUS_NONE = 0
STATUS_AC = 1
STATUS_WA = 2
STATUS_RE = 3
STATUS_TLE = 4
STATUS_MLE = 5
STATUS_CE = 6
STATUS_ERR = 7
MS_BIND = 4096
class StdChal:
'''Standard challenge.
Static attributes:
last_uniqid (int): Last ID.
last_standard_uid (int): Last UID for standard tasks.
last_restrict_uid (int): Last UID for restricted tasks.
null_fd (int): File descriptor of /dev/null.
build_cache (dict): Cache information of builds.
build_cache_refcount (dict): Refcount of build caches.
Attributes:
uniqid (int): Unique ID.
code_path (string): Code path.
res_path (string): Resource path.
comp_typ (string): Type of compile.
judge_typ (string): Type of judge.
test_list ([dict]): Test parameter lists.
metadata (dict): Metadata for judge.
chal_id (int): Challenge ID.
chal_path (string): Challenge path.
'''
last_uniqid = 0
last_standard_uid = Config.CONTAINER_STANDARD_UID_BASE
last_restrict_uid = Config.CONTAINER_RESTRICT_UID_BASE
null_fd = None
@staticmethod
def init():
'''Initialize the module.'''
with StackContext(Privilege.fileaccess):
try:
shutil.rmtree('container/standard/home')
except FileNotFoundError:
pass
os.mkdir('container/standard/home', mode=0o771)
try:
shutil.rmtree('container/standard/cache')
except FileNotFoundError:
pass
os.mkdir('container/standard/cache', mode=0o771)
ffi = FFI()
ffi.cdef('''int mount(const char source[], const char target[],
const char filesystemtype[], unsigned long mountflags,
const void *data);''')
ffi.cdef('''int umount(const char *target);''')
libc = ffi.dlopen('libc.so.6')
with StackContext(Privilege.fullaccess):
libc.umount(b'container/standard/dev')
libc.mount(b'/dev', b'container/standard/dev', b'', MS_BIND, \
ffi.NULL)
StdChal.null_fd = os.open('/dev/null', os.O_RDWR | os.O_CLOEXEC)
StdChal.build_cache = {}
StdChal.build_cache_refcount = {}
@staticmethod
def get_standard_ugid():
'''Generate standard UID/GID.
Returns:
(int, int): Standard UID/GID
'''
StdChal.last_standard_uid += 1
return (StdChal.last_standard_uid, StdChal.last_standard_uid)
@staticmethod
def get_restrict_ugid():
'''Generate restrict UID/GID.
Returns:
(int, int): Restrict UID/GID
'''
StdChal.last_restrict_uid += 1
return (StdChal.last_restrict_uid, StdChal.last_restrict_uid)
@staticmethod
def build_cache_find(res_path):
'''Get build cache.
Args:
res_path (string): Resource path.
Returns:
(string, int): (cache hash, GID) or None if not found.
'''
try:
return StdChal.build_cache[res_path]
except KeyError:
return None
@staticmethod
def build_cache_update(res_path, cache_hash, gid):
'''Update build cache.
Args:
res_path (string): Resource path.
cache_hash (int): Cache hash.
gid (int): GID.
Returns:
None
'''
ret = StdChal.build_cache_find(res_path)
if ret is not None:
StdChal.build_cache_decref(ret[0])
del StdChal.build_cache[res_path]
StdChal.build_cache[res_path] = (cache_hash, gid)
StdChal.build_cache_refcount[cache_hash] = 1
@staticmethod
def build_cache_incref(cache_hash):
'''Increment the refcount of the build cache.
Args:
cache_hash (int): Cache hash.
Returns:
None
'''
StdChal.build_cache_refcount[cache_hash] += 1
@staticmethod
def build_cache_decref(cache_hash):
'''Decrement the refcount of the build cache.
Delete the build cache if the refcount = 0.
Args:
cache_hash (int): Cache hash.
Returns:
None
'''
StdChal.build_cache_refcount[cache_hash] -= 1
if StdChal.build_cache_refcount[cache_hash] == 0:
with StackContext(Privilege.fileaccess):
shutil.rmtree('container/standard/cache/%x'%cache_hash)
def __init__(self, chal_id, code_path, comp_typ, judge_typ, res_path, \
test_list, metadata):
'''Initialize.
Args:
chal_id (int): Challenge ID.
code_path (string): Code path.
comp_typ (string): Type of compile.
judge_typ (string): Type of judge.
res_path (string): Resource path.
test_list ([dict]): Test parameter lists.
metadata (dict): Metadata for judge.
'''
StdChal.last_uniqid += 1
self.uniqid = StdChal.last_uniqid
self.code_path = code_path
self.res_path = res_path
self.comp_typ = comp_typ
self.judge_typ = judge_typ
self.test_list = test_list
self.metadata = metadata
self.chal_id = chal_id
self.chal_path = None
StdChal.last_standard_uid += 1
self.compile_uid, self.compile_gid = StdChal.get_standard_ugid()
@gen.coroutine
def prefetch(self):
'''Prefetch files.'''
path_set = set([self.code_path])
for root, _, files in os.walk(self.res_path):
for filename in files:
path_set.add(os.path.abspath(os.path.join(root, filename)))
path_list = list(path_set)
proc_list = []
with StackContext(Privilege.fileaccess):
for idx in range(0, len(path_list), 16):
proc_list.append(process.Subprocess(
['./Prefetch.py'] + path_list[idx:idx + 16],
stdout=process.Subprocess.STREAM))
for proc in proc_list:
yield proc.stdout.read_bytes(2)
@gen.coroutine
def start(self):
'''Start the challenge.
Returns:
dict: Challenge result.
'''
cache_hash = None
cache_gid = None
# Check if special judge needs to rebuild.
if self.judge_typ in ['ioredir']:
hashproc = process.Subprocess( \
['./HashDir.py', self.res_path + '/check'], \
stdout=process.Subprocess.STREAM)
dirhash = yield hashproc.stdout.read_until(b'\n')
dirhash = int(dirhash.decode('utf-8').rstrip('\n'), 16)
ret = StdChal.build_cache_find(self.res_path)
if ret is not None and ret[0] == dirhash:
cache_hash, cache_gid = ret
judge_ioredir = IORedirJudge('container/standard', \
'/cache/%x'%cache_hash)
else:
cache_hash = dirhash
_, cache_gid = StdChal.get_standard_ugid()
build_ugid = StdChal.get_standard_ugid()
build_relpath = '/cache/%x'%cache_hash
build_path = 'container/standard' + build_relpath
judge_ioredir = IORedirJudge('container/standard', \
build_relpath)
if not (yield judge_ioredir.build(build_ugid, self.res_path)):
return [(0, 0, STATUS_ERR)] * len(self.test_list), ''
FileUtils.setperm(build_path, \
Privilege.JUDGE_UID, cache_gid, umask=0o750)
with StackContext(Privilege.fullaccess):
os.chmod(build_path, 0o750)
StdChal.build_cache_update(self.res_path, cache_hash, cache_gid)
print('StdChal %d built checker %x'%(self.chal_id, cache_hash))
StdChal.build_cache_incref(cache_hash)
print('StdChal %d started'%self.chal_id)
# Create challenge environment.
self.chal_path = 'container/standard/home/%d'%self.uniqid
with StackContext(Privilege.fileaccess):
os.mkdir(self.chal_path, mode=0o771)
try:
yield self.prefetch()
print('StdChal %d prefetched'%self.chal_id)
if self.comp_typ in ['g++', 'clang++']:
ret, verdict = yield self.comp_cxx()
elif self.comp_typ == 'makefile':
ret, verdict = yield self.comp_make()
elif self.comp_typ == 'python3':
ret, verdict = yield self.comp_python()
if ret != PyExt.DETECT_NONE:
return [(0, 0, STATUS_CE, verdict)] * len(self.test_list)
print('StdChal %d compiled'%self.chal_id)
# Prepare test arguments
if self.comp_typ == 'python3':
exefile_path = self.chal_path \
+ '/compile/__pycache__/test.cpython-34.pyc'
exe_path = '/usr/bin/python3.5'
argv = ['./a.out']
envp = ['HOME=/', 'LANG=en_US.UTF-8']
else:
exefile_path = self.chal_path + '/compile/a.out'
exe_path = './a.out'
argv = []
envp = []
# Prepare judge
test_future = []
if self.judge_typ == 'diff':
for test in self.test_list:
test_future.append(self.judge_diff(
exefile_path,
exe_path, argv, envp,
test['in'], test['ans'],
test['timelimit'], test['memlimit']))
elif self.judge_typ == 'ioredir':
for test in self.test_list:
check_uid, _ = StdChal.get_standard_ugid()
test_uid, test_gid = StdChal.get_restrict_ugid()
test_future.append(judge_ioredir.judge( \
exefile_path, exe_path, argv, envp, \
(check_uid, cache_gid), \
(test_uid, test_gid), \
'/home/%d/run_%d'%(self.uniqid, test_uid), \
test, self.metadata))
# Emit tests
test_result = yield gen.multi(test_future)
ret_result = list()
for result in test_result:
test_pass, data, verdict = result
runtime, peakmem, error = data
status = STATUS_ERR
if error == PyExt.DETECT_NONE:
if test_pass is True:
status = STATUS_AC
else:
status = STATUS_WA
elif error == PyExt.DETECT_OOM:
status = STATUS_MLE
elif error == PyExt.DETECT_TIMEOUT \
or error == PyExt.DETECT_FORCETIMEOUT:
status = STATUS_TLE
elif error == PyExt.DETECT_EXITERR:
status = STATUS_RE
else:
status = STATUS_ERR
ret_result.append((runtime, peakmem, status, verdict))
return ret_result
finally:
if cache_hash is not None:
StdChal.build_cache_decref(cache_hash)
with StackContext(Privilege.fileaccess):
shutil.rmtree(self.chal_path)
print('StdChal %d done'%self.chal_id)
@concurrent.return_future
def comp_cxx(self, callback=None):
'''GCC, Clang compile.
Args:
callback (function): Callback of return_future.
Returns:
None
'''
def _started_cb(task_id):
'''Started callback.
Close unused file descriptors after the task is started.
Args:
task_id (int): Task ID.
Returns:
None
'''
nonlocal errpipe_fd
os.close(errpipe_fd)
def _done_cb(task_id, stat):
'''Done callback.
Args:
task_id (int): Task ID.
stat (dict): Task result.
Returns:
None
'''
nonlocal compile_path
with StackContext(Privilege.fileaccess):
verfile = open(compile_path + '/verdict.txt', 'rb')
# To fix decoding error.
# Force convert the binary string to string temporarily.
verdict = ''.join(chr(c) for c in verfile.read(140))
verfile.close()
callback((stat['detect_error'], verdict))
compile_path = self.chal_path + '/compile'
with StackContext(Privilege.fileaccess):
os.mkdir(compile_path, mode=0o770)
shutil.copyfile(self.code_path, compile_path + '/test.cpp', \
follow_symlinks=False)
FileUtils.setperm(compile_path, self.compile_uid, self.compile_gid)
with StackContext(Privilege.fileaccess):
errpipe_fd = os.open(compile_path + '/verdict.txt', \
os.O_WRONLY | os.O_CREAT | os.O_CLOEXEC, mode=0o440)
if self.comp_typ == 'g++':
compiler = '/usr/bin/g++'
elif self.comp_typ == 'clang++':
compiler = '/usr/bin/clang++'
task_id = PyExt.create_task(compiler, \
[
'-O2',
'-std=c++14',
'-o', './a.out',
'./test.cpp',
], \
[
'PATH=/usr/bin:/bin',
'TMPDIR=/home/%d/compile'%self.uniqid,
], \
{
0: StdChal.null_fd,
1: StdChal.null_fd,
2: errpipe_fd,
}, \
'/home/%d/compile'%self.uniqid, 'container/standard', \
self.compile_uid, self.compile_gid, 60000, 1024 * 1024 * 1024, \
PyExt.RESTRICT_LEVEL_LOW)
if task_id is None:
os.close(errpipe_fd)
callback((PyExt.DETECT_INTERNALERR, ''))
return
PyExt.start_task(task_id, _done_cb, _started_cb)
@concurrent.return_future
def comp_make(self, callback=None):
'''Makefile compile.
Args:
callback (function): Callback of return_future.
Returns:
None
'''
def _done_cb(task_id, stat):
'''Done callback.
Args:
task_id (int): Task ID.
stat (dict): Task result.
Returns:
None
'''
callback((stat['detect_error'], ''))
make_path = self.chal_path + '/compile'
FileUtils.copydir(self.res_path + '/make', make_path)
with StackContext(Privilege.fileaccess):
shutil.copyfile(self.code_path, make_path + '/main.cpp', \
follow_symlinks=False)
FileUtils.setperm(make_path, self.compile_uid, self.compile_gid)
with StackContext(Privilege.fullaccess):
os.chmod(make_path, mode=0o770)
task_id = PyExt.create_task('/usr/bin/make', \
[], \
[
'PATH=/usr/bin:/bin',
'TMPDIR=/home/%d/compile'%self.uniqid,
'OUT=./a.out',
], \
{
0: StdChal.null_fd,
1: StdChal.null_fd,
2: StdChal.null_fd,
}, \
'/home/%d/compile'%self.uniqid, 'container/standard', \
self.compile_uid, self.compile_gid, 60000, 1024 * 1024 * 1024, \
PyExt.RESTRICT_LEVEL_LOW)
if task_id is None:
callback((PyExt.DETECT_INTERNALERR, ''))
else:
PyExt.start_task(task_id, _done_cb)
@concurrent.return_future
def comp_python(self, callback=None):
'''Python3.4 compile.
Args:
callback (function): Callback of return_future.
Returns:
None
'''
def _started_cb(task_id):
'''Started callback.
Close unused file descriptors after the task is started.
Args:
task_id (int): Task ID.
Returns:
None
'''
nonlocal errpipe_fd
os.close(errpipe_fd)
def _done_cb(task_id, stat):
'''Done callback.
Args:
task_id (int): Task ID.
stat (dict): Task result.
Returns:
None
'''
nonlocal compile_path
with StackContext(Privilege.fileaccess):
verfile = open(compile_path + '/verdict.txt', 'rb')
# To fix decoding error.
# Force convert the binary string to string temporarily.
verdict = ''.join(chr(c) for c in verfile.read(140))
verfile.close()
callback((stat['detect_error'], verdict))
compile_path = self.chal_path + '/compile'
with StackContext(Privilege.fileaccess):
os.mkdir(compile_path, mode=0o770)
shutil.copyfile(self.code_path, compile_path + '/test.py', \
follow_symlinks=False)
FileUtils.setperm(compile_path, self.compile_uid, self.compile_gid)
with StackContext(Privilege.fileaccess):
errpipe_fd = os.open(compile_path + '/verdict.txt', \
os.O_WRONLY | os.O_CREAT | os.O_CLOEXEC, mode=0o440)
task_id = PyExt.create_task('/usr/bin/python3.5', \
[
'-m',
'py_compile',
'./test.py'
], \
[
'HOME=/home/%d/compile'%self.uniqid,
'LANG=en_US.UTF-8'
], \
{
0: StdChal.null_fd,
1: StdChal.null_fd,
2: errpipe_fd,
}, \
'/home/%d/compile'%self.uniqid, 'container/standard', \
self.compile_uid, self.compile_gid, 60000, 1024 * 1024 * 1024, \
PyExt.RESTRICT_LEVEL_LOW)
if task_id is None:
os.close(errpipe_fd)
callback((PyExt.DETECT_INTERNALERR, ''))
return
PyExt.start_task(task_id, _done_cb, _started_cb)
@concurrent.return_future
def judge_diff(self, src_path, exe_path, argv, envp, in_path, ans_path, \
timelimit, memlimit, callback=None):
'''Diff judge.
Args:
src_path (string): Executable source path.
exe_path (string): Executable or interpreter path in the sandbox.
argv ([string]): List of arguments.
envp ([string]): List of environment variables.
in_path (string): Input file path.
ans_path (string): Answer file path.
timelimit (int): Timelimit.
memlimit (int): Memlimit.
callback (function): Callback of return_future.
Returns:
None
'''
def _started_cb(task_id):
'''Started callback.
Close unused file descriptors after the task is started.
Args:
task_id (int): Task ID.
Returns:
None
'''
nonlocal infile_fd
nonlocal outpipe_fd
os.close(infile_fd)
os.close(outpipe_fd[1])
IOLoop.instance().add_handler(outpipe_fd[0], _diff_out, \
IOLoop.READ | IOLoop.ERROR)
def _done_cb(task_id, stat):
'''Done callback.
Args:
task_id (int): Task ID.
stat (dict): Task result.
Returns:
None
'''
nonlocal result_stat
nonlocal result_pass
result_stat = (stat['utime'], stat['peakmem'], stat['detect_error'])
if result_pass is not None:
callback((result_pass, result_stat, ''))
def _diff_out(evfd, events):
'''Diff the output of the task.
Args:
evfd (int): Event file descriptor.
events (int): Event flags.
Returns:
None
'''
nonlocal outpipe_fd
nonlocal ansfile
nonlocal result_stat
nonlocal result_pass
end_flag = False
if events & IOLoop.READ:
while True:
try:
data = os.read(outpipe_fd[0], 65536)
except BlockingIOError:
break
ansdata = ansfile.read(len(data))
if data != ansdata:
result_pass = False
end_flag = True
break
if len(ansdata) == 0:
if len(ansfile.read(1)) == 0:
result_pass = True
else:
result_pass = False
end_flag = True
break
if (events & IOLoop.ERROR) or end_flag:
if result_pass is None:
if len(ansfile.read(1)) == 0:
result_pass = True
else:
result_pass = False
IOLoop.instance().remove_handler(evfd)
os.close(outpipe_fd[0])
ansfile.close()
if result_stat is not None:
callback((result_pass, result_stat, ''))
judge_uid, judge_gid = StdChal.get_restrict_ugid()
# Prepare I/O and stat.
with StackContext(Privilege.fileaccess):
infile_fd = os.open(in_path, os.O_RDONLY | os.O_CLOEXEC)
ansfile = open(ans_path, 'rb')
outpipe_fd = os.pipe2(os.O_CLOEXEC)
fcntl.fcntl(outpipe_fd[0], fcntl.F_SETFL, os.O_NONBLOCK)
result_stat = None
result_pass = None
# Prepare judge environment.
with StackContext(Privilege.fileaccess):
judge_path = self.chal_path + '/run_%d'%judge_uid
os.mkdir(judge_path, mode=0o771)
shutil.copyfile(src_path, judge_path + '/a.out', \
follow_symlinks=False)
with StackContext(Privilege.fullaccess):
os.chown(judge_path + '/a.out', judge_uid, judge_gid)
os.chmod(judge_path + '/a.out', 0o500)
task_id = PyExt.create_task(exe_path, argv, envp, \
{
0: infile_fd,
1: outpipe_fd[1],
2: outpipe_fd[1],
}, \
'/home/%d/run_%d'%(self.uniqid, judge_uid), 'container/standard', \
judge_uid, judge_gid, timelimit, memlimit, \
PyExt.RESTRICT_LEVEL_HIGH)
if task_id is None:
os.close(infile_fd)
os.close(outpipe_fd[0])
os.close(outpipe_fd[1])
ansfile.close()
callback((False, (0, 0, PyExt.DETECT_INTERNALERR), ''))
else:
PyExt.start_task(task_id, _done_cb, _started_cb)
class IORedirJudge:
'''I/O redirect spcial judge.
Attributes:
container_path (string): Container path.
build_relpath (string): Relative build path.
build_path (string): Build path.
'''
def __init__(self, container_path, build_relpath):
'''Initialize.
Args:
container_path (string): Container path.
build_relpath (string): Relative build path.
'''
self.container_path = container_path
self.build_relpath = build_relpath
self.build_path = container_path + build_relpath
@concurrent.return_future
def build(self, build_ugid, res_path, callback=None):
'''Build environment.
Args:
build_ugid ((int, int)): Build UID/GID.
res_path (string): Resource path.
callback (function): Callback of return_future.
Returns:
None
'''
def _done_cb(task_id, stat):
'''Done callback.
Args:
task_id (int): Task ID.
stat (dict): Task result.
Returns:
None
'''
if stat['detect_error'] == PyExt.DETECT_NONE:
callback(True)
else:
callback(False)
build_uid, build_gid = build_ugid
# Prepare build environment.
FileUtils.copydir(res_path + '/check', self.build_path)
FileUtils.setperm(self.build_path, build_uid, build_gid)
with StackContext(Privilege.fullaccess):
os.chmod(self.build_path, mode=0o770)
with StackContext(Privilege.fileaccess):
if not os.path.isfile(self.build_path + '/build'):
callback(True)
return
# Make the build file executable.
with StackContext(Privilege.fullaccess):
os.chmod(self.build_path + '/build', mode=0o770)
# Build.
task_id = PyExt.create_task(self.build_relpath + '/build', \
[], \
[
'PATH=/usr/bin:/bin',
'TMPDIR=%s'%self.build_relpath,
'HOME=%s'%self.build_relpath,
'LANG=en_US.UTF-8'
], \
{
0: StdChal.null_fd,
1: StdChal.null_fd,
2: StdChal.null_fd,
}, \
self.build_relpath, 'container/standard', \
build_uid, build_gid, 60000, 1024 * 1024 * 1024, \
PyExt.RESTRICT_LEVEL_LOW)
if task_id is None:
callback(False)
else:
PyExt.start_task(task_id, _done_cb)
@concurrent.return_future
def judge(self, src_path, exe_relpath, argv, envp, check_ugid, test_ugid, \
test_relpath, test_param, metadata, callback=None):
'''I/O redirect special judge.
Args:
src_path (string): Executable source path.
exe_relpath (string): Executable or interpreter path in the sandbox.
argv ([string]): List of arguments.
envp ([string]): List of environment variables.
check_ugid (int, int): Check UID/GID.
test_ugid (int, int): Test UID/GID.
test_relpath (string): Test relative path.
test_param (dict): Test parameters.
metadata (dict): Metadata.
callback (function): Callback of return_future.
Returns:
None
'''
def _check_started_cb(task_id):
'''Check started callback.
Close unused file descriptors after the check is started.
Args:
task_id (int): Task ID.
Returns:
None
'''
nonlocal inpipe_fd
nonlocal outpipe_fd
nonlocal ansfile_fd
nonlocal check_infile_fd
os.close(inpipe_fd[1])
os.close(outpipe_fd[0])
if ansfile_fd is not None:
os.close(ansfile_fd)
if check_infile_fd is not None:
os.close(check_infile_fd)
def _test_started_cb(task_id):
'''Test started callback.
Close unused file descriptors after the test is started.
Args:
task_id (int): Task ID.
Returns:
None
'''
nonlocal inpipe_fd
nonlocal outpipe_fd
nonlocal outfile_fd
nonlocal test_infile_fd
os.close(inpipe_fd[0])
os.close(outpipe_fd[1])
os.close(outfile_fd)
if test_infile_fd is not None:
os.close(test_infile_fd)
def _done_cb():
'''Done callback.'''
nonlocal result_stat
nonlocal result_pass
nonlocal verdict_path
if result_pass is not None and result_stat is not None:
with StackContext(Privilege.fileaccess):
verfile = open(verdict_path, 'r')
verdict = verfile.read(140)
verfile.close()
callback((result_pass, result_stat, verdict))
return
def _check_done_cb(task_id, stat):
'''Check done callback.
Args:
task_id (int): Task ID.
stat (dict): Task result.
Returns:
None
'''
nonlocal result_pass
if stat['detect_error'] == PyExt.DETECT_NONE:
result_pass = True
else:
result_pass = False
_done_cb()
def _test_done_cb(task_id, stat):
'''Test done callback.
Args:
task_id (int): Task ID.
stat (dict): Task result.
Returns:
None
'''
nonlocal result_stat
result_stat = (stat['utime'], stat['peakmem'], stat['detect_error'])
_done_cb()
result_stat = None
result_pass = None
in_path = test_param['in']
ans_path = test_param['ans']
timelimit = test_param['timelimit']
memlimit = test_param['memlimit']
check_uid, check_gid = check_ugid
test_uid, test_gid = test_ugid
test_path = self.container_path + test_relpath
output_relpath = test_relpath + '/output.txt'
output_path = self.container_path + output_relpath
verdict_relpath = test_relpath + '/verdict.txt'
verdict_path = self.container_path + verdict_relpath
# Prepare test environment.
with StackContext(Privilege.fileaccess):
os.mkdir(test_path, mode=0o771)
shutil.copyfile(src_path, test_path + '/a.out', \
follow_symlinks=False)
with StackContext(Privilege.fullaccess):
os.chown(test_path + '/a.out', test_uid, test_gid)
os.chmod(test_path + '/a.out', 0o500)
# Prepare I/O.
with StackContext(Privilege.fileaccess):
try:
check_infile_fd = os.open(in_path, os.O_RDONLY | os.O_CLOEXEC)
test_infile_fd = os.open(in_path, os.O_RDONLY | os.O_CLOEXEC)
except (FileNotFoundError, TypeError):
check_infile_fd = None
test_infile_fd = None
try:
ansfile_fd = os.open(ans_path, os.O_RDONLY | os.O_CLOEXEC)
except (FileNotFoundError, TypeError):
ansfile_fd = None
outfile_fd = os.open(output_path, \
os.O_WRONLY | os.O_CREAT | os.O_CLOEXEC, mode=0o400)
os.close(os.open(verdict_path,
os.O_CREAT | os.O_CLOEXEC, mode=0o640))
with StackContext(Privilege.fullaccess):
os.chown(output_path, check_uid, check_gid)
os.chown(verdict_path, check_uid, check_gid)
inpipe_fd = os.pipe2(os.O_CLOEXEC)
outpipe_fd = os.pipe2(os.O_CLOEXEC)
# Set file descriptor mapping.
check_fdmap = {
0: StdChal.null_fd,
1: StdChal.null_fd,
2: StdChal.null_fd,
}
test_fdmap = {
0: StdChal.null_fd,
1: StdChal.null_fd,
2: StdChal.null_fd,
}
if check_infile_fd is not None:
check_fdmap[metadata['redir_check']['testin']] = check_infile_fd
if ansfile_fd is not None:
check_fdmap[metadata['redir_check']['ansin']] = ansfile_fd
check_fdmap[metadata['redir_check']['pipein']] = inpipe_fd[1]
check_fdmap[metadata['redir_check']['pipeout']] = outpipe_fd[0]
try:
del check_fdmap[-1]
except KeyError:
pass
if test_infile_fd is not None:
test_fdmap[metadata['redir_test']['testin']] = test_infile_fd
test_fdmap[metadata['redir_test']['testout']] = outfile_fd
test_fdmap[metadata['redir_test']['pipein']] = inpipe_fd[0]
test_fdmap[metadata['redir_test']['pipeout']] = outpipe_fd[1]
try:
del test_fdmap[-1]
except KeyError:
pass
check_task_id = PyExt.create_task(self.build_relpath + '/check', \
[], \
[
'PATH=/usr/bin:/bin',
'HOME=%s'%self.build_relpath,
'LANG=en_US.UTF-8',
'OUTPUT=%s'%output_relpath,
'VERDICT=%s'%verdict_relpath,
], \
check_fdmap, \
self.build_relpath, self.container_path, \
check_uid, check_gid, 60000, 1024 * 1024 * 1024, \
PyExt.RESTRICT_LEVEL_LOW)
if check_task_id is None:
callback((False, (0, 0, PyExt.DETECT_INTERNALERR), ''))
return
PyExt.start_task(check_task_id, _check_done_cb, _check_started_cb)
test_task_id = PyExt.create_task(exe_relpath, argv, envp, \
test_fdmap, \
test_relpath, self.container_path, \
test_uid, test_gid, timelimit, memlimit, \
PyExt.RESTRICT_LEVEL_HIGH)
if test_task_id is None:
callback((False, (0, 0, PyExt.DETECT_INTERNALERR), ''))
return
PyExt.start_task(test_task_id, _test_done_cb, _test_started_cb)
| 31.179724
| 80
| 0.532131
|
import os
import shutil
import fcntl
from cffi import FFI
from tornado import gen, concurrent, process
from tornado.stack_context import StackContext
from tornado.ioloop import IOLoop
import PyExt
import Privilege
import Config
from Utils import FileUtils
STATUS_NONE = 0
STATUS_AC = 1
STATUS_WA = 2
STATUS_RE = 3
STATUS_TLE = 4
STATUS_MLE = 5
STATUS_CE = 6
STATUS_ERR = 7
MS_BIND = 4096
class StdChal:
last_uniqid = 0
last_standard_uid = Config.CONTAINER_STANDARD_UID_BASE
last_restrict_uid = Config.CONTAINER_RESTRICT_UID_BASE
null_fd = None
@staticmethod
def init():
with StackContext(Privilege.fileaccess):
try:
shutil.rmtree('container/standard/home')
except FileNotFoundError:
pass
os.mkdir('container/standard/home', mode=0o771)
try:
shutil.rmtree('container/standard/cache')
except FileNotFoundError:
pass
os.mkdir('container/standard/cache', mode=0o771)
ffi = FFI()
ffi.cdef('''int mount(const char source[], const char target[],
const char filesystemtype[], unsigned long mountflags,
const void *data);''')
ffi.cdef('''int umount(const char *target);''')
libc = ffi.dlopen('libc.so.6')
with StackContext(Privilege.fullaccess):
libc.umount(b'container/standard/dev')
libc.mount(b'/dev', b'container/standard/dev', b'', MS_BIND, \
ffi.NULL)
StdChal.null_fd = os.open('/dev/null', os.O_RDWR | os.O_CLOEXEC)
StdChal.build_cache = {}
StdChal.build_cache_refcount = {}
@staticmethod
def get_standard_ugid():
StdChal.last_standard_uid += 1
return (StdChal.last_standard_uid, StdChal.last_standard_uid)
@staticmethod
def get_restrict_ugid():
StdChal.last_restrict_uid += 1
return (StdChal.last_restrict_uid, StdChal.last_restrict_uid)
@staticmethod
def build_cache_find(res_path):
try:
return StdChal.build_cache[res_path]
except KeyError:
return None
@staticmethod
def build_cache_update(res_path, cache_hash, gid):
ret = StdChal.build_cache_find(res_path)
if ret is not None:
StdChal.build_cache_decref(ret[0])
del StdChal.build_cache[res_path]
StdChal.build_cache[res_path] = (cache_hash, gid)
StdChal.build_cache_refcount[cache_hash] = 1
@staticmethod
def build_cache_incref(cache_hash):
StdChal.build_cache_refcount[cache_hash] += 1
@staticmethod
def build_cache_decref(cache_hash):
StdChal.build_cache_refcount[cache_hash] -= 1
if StdChal.build_cache_refcount[cache_hash] == 0:
with StackContext(Privilege.fileaccess):
shutil.rmtree('container/standard/cache/%x'%cache_hash)
def __init__(self, chal_id, code_path, comp_typ, judge_typ, res_path, \
test_list, metadata):
StdChal.last_uniqid += 1
self.uniqid = StdChal.last_uniqid
self.code_path = code_path
self.res_path = res_path
self.comp_typ = comp_typ
self.judge_typ = judge_typ
self.test_list = test_list
self.metadata = metadata
self.chal_id = chal_id
self.chal_path = None
StdChal.last_standard_uid += 1
self.compile_uid, self.compile_gid = StdChal.get_standard_ugid()
@gen.coroutine
def prefetch(self):
path_set = set([self.code_path])
for root, _, files in os.walk(self.res_path):
for filename in files:
path_set.add(os.path.abspath(os.path.join(root, filename)))
path_list = list(path_set)
proc_list = []
with StackContext(Privilege.fileaccess):
for idx in range(0, len(path_list), 16):
proc_list.append(process.Subprocess(
['./Prefetch.py'] + path_list[idx:idx + 16],
stdout=process.Subprocess.STREAM))
for proc in proc_list:
yield proc.stdout.read_bytes(2)
@gen.coroutine
def start(self):
cache_hash = None
cache_gid = None
if self.judge_typ in ['ioredir']:
hashproc = process.Subprocess( \
['./HashDir.py', self.res_path + '/check'], \
stdout=process.Subprocess.STREAM)
dirhash = yield hashproc.stdout.read_until(b'\n')
dirhash = int(dirhash.decode('utf-8').rstrip('\n'), 16)
ret = StdChal.build_cache_find(self.res_path)
if ret is not None and ret[0] == dirhash:
cache_hash, cache_gid = ret
judge_ioredir = IORedirJudge('container/standard', \
'/cache/%x'%cache_hash)
else:
cache_hash = dirhash
_, cache_gid = StdChal.get_standard_ugid()
build_ugid = StdChal.get_standard_ugid()
build_relpath = '/cache/%x'%cache_hash
build_path = 'container/standard' + build_relpath
judge_ioredir = IORedirJudge('container/standard', \
build_relpath)
if not (yield judge_ioredir.build(build_ugid, self.res_path)):
return [(0, 0, STATUS_ERR)] * len(self.test_list), ''
FileUtils.setperm(build_path, \
Privilege.JUDGE_UID, cache_gid, umask=0o750)
with StackContext(Privilege.fullaccess):
os.chmod(build_path, 0o750)
StdChal.build_cache_update(self.res_path, cache_hash, cache_gid)
print('StdChal %d built checker %x'%(self.chal_id, cache_hash))
StdChal.build_cache_incref(cache_hash)
print('StdChal %d started'%self.chal_id)
self.chal_path = 'container/standard/home/%d'%self.uniqid
with StackContext(Privilege.fileaccess):
os.mkdir(self.chal_path, mode=0o771)
try:
yield self.prefetch()
print('StdChal %d prefetched'%self.chal_id)
if self.comp_typ in ['g++', 'clang++']:
ret, verdict = yield self.comp_cxx()
elif self.comp_typ == 'makefile':
ret, verdict = yield self.comp_make()
elif self.comp_typ == 'python3':
ret, verdict = yield self.comp_python()
if ret != PyExt.DETECT_NONE:
return [(0, 0, STATUS_CE, verdict)] * len(self.test_list)
print('StdChal %d compiled'%self.chal_id)
if self.comp_typ == 'python3':
exefile_path = self.chal_path \
+ '/compile/__pycache__/test.cpython-34.pyc'
exe_path = '/usr/bin/python3.5'
argv = ['./a.out']
envp = ['HOME=/', 'LANG=en_US.UTF-8']
else:
exefile_path = self.chal_path + '/compile/a.out'
exe_path = './a.out'
argv = []
envp = []
test_future = []
if self.judge_typ == 'diff':
for test in self.test_list:
test_future.append(self.judge_diff(
exefile_path,
exe_path, argv, envp,
test['in'], test['ans'],
test['timelimit'], test['memlimit']))
elif self.judge_typ == 'ioredir':
for test in self.test_list:
check_uid, _ = StdChal.get_standard_ugid()
test_uid, test_gid = StdChal.get_restrict_ugid()
test_future.append(judge_ioredir.judge( \
exefile_path, exe_path, argv, envp, \
(check_uid, cache_gid), \
(test_uid, test_gid), \
'/home/%d/run_%d'%(self.uniqid, test_uid), \
test, self.metadata))
test_result = yield gen.multi(test_future)
ret_result = list()
for result in test_result:
test_pass, data, verdict = result
runtime, peakmem, error = data
status = STATUS_ERR
if error == PyExt.DETECT_NONE:
if test_pass is True:
status = STATUS_AC
else:
status = STATUS_WA
elif error == PyExt.DETECT_OOM:
status = STATUS_MLE
elif error == PyExt.DETECT_TIMEOUT \
or error == PyExt.DETECT_FORCETIMEOUT:
status = STATUS_TLE
elif error == PyExt.DETECT_EXITERR:
status = STATUS_RE
else:
status = STATUS_ERR
ret_result.append((runtime, peakmem, status, verdict))
return ret_result
finally:
if cache_hash is not None:
StdChal.build_cache_decref(cache_hash)
with StackContext(Privilege.fileaccess):
shutil.rmtree(self.chal_path)
print('StdChal %d done'%self.chal_id)
@concurrent.return_future
def comp_cxx(self, callback=None):
def _started_cb(task_id):
nonlocal errpipe_fd
os.close(errpipe_fd)
def _done_cb(task_id, stat):
nonlocal compile_path
with StackContext(Privilege.fileaccess):
verfile = open(compile_path + '/verdict.txt', 'rb')
verdict = ''.join(chr(c) for c in verfile.read(140))
verfile.close()
callback((stat['detect_error'], verdict))
compile_path = self.chal_path + '/compile'
with StackContext(Privilege.fileaccess):
os.mkdir(compile_path, mode=0o770)
shutil.copyfile(self.code_path, compile_path + '/test.cpp', \
follow_symlinks=False)
FileUtils.setperm(compile_path, self.compile_uid, self.compile_gid)
with StackContext(Privilege.fileaccess):
errpipe_fd = os.open(compile_path + '/verdict.txt', \
os.O_WRONLY | os.O_CREAT | os.O_CLOEXEC, mode=0o440)
if self.comp_typ == 'g++':
compiler = '/usr/bin/g++'
elif self.comp_typ == 'clang++':
compiler = '/usr/bin/clang++'
task_id = PyExt.create_task(compiler, \
[
'-O2',
'-std=c++14',
'-o', './a.out',
'./test.cpp',
], \
[
'PATH=/usr/bin:/bin',
'TMPDIR=/home/%d/compile'%self.uniqid,
], \
{
0: StdChal.null_fd,
1: StdChal.null_fd,
2: errpipe_fd,
}, \
'/home/%d/compile'%self.uniqid, 'container/standard', \
self.compile_uid, self.compile_gid, 60000, 1024 * 1024 * 1024, \
PyExt.RESTRICT_LEVEL_LOW)
if task_id is None:
os.close(errpipe_fd)
callback((PyExt.DETECT_INTERNALERR, ''))
return
PyExt.start_task(task_id, _done_cb, _started_cb)
@concurrent.return_future
def comp_make(self, callback=None):
def _done_cb(task_id, stat):
callback((stat['detect_error'], ''))
make_path = self.chal_path + '/compile'
FileUtils.copydir(self.res_path + '/make', make_path)
with StackContext(Privilege.fileaccess):
shutil.copyfile(self.code_path, make_path + '/main.cpp', \
follow_symlinks=False)
FileUtils.setperm(make_path, self.compile_uid, self.compile_gid)
with StackContext(Privilege.fullaccess):
os.chmod(make_path, mode=0o770)
task_id = PyExt.create_task('/usr/bin/make', \
[], \
[
'PATH=/usr/bin:/bin',
'TMPDIR=/home/%d/compile'%self.uniqid,
'OUT=./a.out',
], \
{
0: StdChal.null_fd,
1: StdChal.null_fd,
2: StdChal.null_fd,
}, \
'/home/%d/compile'%self.uniqid, 'container/standard', \
self.compile_uid, self.compile_gid, 60000, 1024 * 1024 * 1024, \
PyExt.RESTRICT_LEVEL_LOW)
if task_id is None:
callback((PyExt.DETECT_INTERNALERR, ''))
else:
PyExt.start_task(task_id, _done_cb)
@concurrent.return_future
def comp_python(self, callback=None):
def _started_cb(task_id):
nonlocal errpipe_fd
os.close(errpipe_fd)
def _done_cb(task_id, stat):
nonlocal compile_path
with StackContext(Privilege.fileaccess):
verfile = open(compile_path + '/verdict.txt', 'rb')
verdict = ''.join(chr(c) for c in verfile.read(140))
verfile.close()
callback((stat['detect_error'], verdict))
compile_path = self.chal_path + '/compile'
with StackContext(Privilege.fileaccess):
os.mkdir(compile_path, mode=0o770)
shutil.copyfile(self.code_path, compile_path + '/test.py', \
follow_symlinks=False)
FileUtils.setperm(compile_path, self.compile_uid, self.compile_gid)
with StackContext(Privilege.fileaccess):
errpipe_fd = os.open(compile_path + '/verdict.txt', \
os.O_WRONLY | os.O_CREAT | os.O_CLOEXEC, mode=0o440)
task_id = PyExt.create_task('/usr/bin/python3.5', \
[
'-m',
'py_compile',
'./test.py'
], \
[
'HOME=/home/%d/compile'%self.uniqid,
'LANG=en_US.UTF-8'
], \
{
0: StdChal.null_fd,
1: StdChal.null_fd,
2: errpipe_fd,
}, \
'/home/%d/compile'%self.uniqid, 'container/standard', \
self.compile_uid, self.compile_gid, 60000, 1024 * 1024 * 1024, \
PyExt.RESTRICT_LEVEL_LOW)
if task_id is None:
os.close(errpipe_fd)
callback((PyExt.DETECT_INTERNALERR, ''))
return
PyExt.start_task(task_id, _done_cb, _started_cb)
@concurrent.return_future
def judge_diff(self, src_path, exe_path, argv, envp, in_path, ans_path, \
timelimit, memlimit, callback=None):
def _started_cb(task_id):
nonlocal infile_fd
nonlocal outpipe_fd
os.close(infile_fd)
os.close(outpipe_fd[1])
IOLoop.instance().add_handler(outpipe_fd[0], _diff_out, \
IOLoop.READ | IOLoop.ERROR)
def _done_cb(task_id, stat):
nonlocal result_stat
nonlocal result_pass
result_stat = (stat['utime'], stat['peakmem'], stat['detect_error'])
if result_pass is not None:
callback((result_pass, result_stat, ''))
def _diff_out(evfd, events):
nonlocal outpipe_fd
nonlocal ansfile
nonlocal result_stat
nonlocal result_pass
end_flag = False
if events & IOLoop.READ:
while True:
try:
data = os.read(outpipe_fd[0], 65536)
except BlockingIOError:
break
ansdata = ansfile.read(len(data))
if data != ansdata:
result_pass = False
end_flag = True
break
if len(ansdata) == 0:
if len(ansfile.read(1)) == 0:
result_pass = True
else:
result_pass = False
end_flag = True
break
if (events & IOLoop.ERROR) or end_flag:
if result_pass is None:
if len(ansfile.read(1)) == 0:
result_pass = True
else:
result_pass = False
IOLoop.instance().remove_handler(evfd)
os.close(outpipe_fd[0])
ansfile.close()
if result_stat is not None:
callback((result_pass, result_stat, ''))
judge_uid, judge_gid = StdChal.get_restrict_ugid()
with StackContext(Privilege.fileaccess):
infile_fd = os.open(in_path, os.O_RDONLY | os.O_CLOEXEC)
ansfile = open(ans_path, 'rb')
outpipe_fd = os.pipe2(os.O_CLOEXEC)
fcntl.fcntl(outpipe_fd[0], fcntl.F_SETFL, os.O_NONBLOCK)
result_stat = None
result_pass = None
with StackContext(Privilege.fileaccess):
judge_path = self.chal_path + '/run_%d'%judge_uid
os.mkdir(judge_path, mode=0o771)
shutil.copyfile(src_path, judge_path + '/a.out', \
follow_symlinks=False)
with StackContext(Privilege.fullaccess):
os.chown(judge_path + '/a.out', judge_uid, judge_gid)
os.chmod(judge_path + '/a.out', 0o500)
task_id = PyExt.create_task(exe_path, argv, envp, \
{
0: infile_fd,
1: outpipe_fd[1],
2: outpipe_fd[1],
}, \
'/home/%d/run_%d'%(self.uniqid, judge_uid), 'container/standard', \
judge_uid, judge_gid, timelimit, memlimit, \
PyExt.RESTRICT_LEVEL_HIGH)
if task_id is None:
os.close(infile_fd)
os.close(outpipe_fd[0])
os.close(outpipe_fd[1])
ansfile.close()
callback((False, (0, 0, PyExt.DETECT_INTERNALERR), ''))
else:
PyExt.start_task(task_id, _done_cb, _started_cb)
class IORedirJudge:
def __init__(self, container_path, build_relpath):
self.container_path = container_path
self.build_relpath = build_relpath
self.build_path = container_path + build_relpath
@concurrent.return_future
def build(self, build_ugid, res_path, callback=None):
def _done_cb(task_id, stat):
if stat['detect_error'] == PyExt.DETECT_NONE:
callback(True)
else:
callback(False)
build_uid, build_gid = build_ugid
FileUtils.copydir(res_path + '/check', self.build_path)
FileUtils.setperm(self.build_path, build_uid, build_gid)
with StackContext(Privilege.fullaccess):
os.chmod(self.build_path, mode=0o770)
with StackContext(Privilege.fileaccess):
if not os.path.isfile(self.build_path + '/build'):
callback(True)
return
with StackContext(Privilege.fullaccess):
os.chmod(self.build_path + '/build', mode=0o770)
task_id = PyExt.create_task(self.build_relpath + '/build', \
[], \
[
'PATH=/usr/bin:/bin',
'TMPDIR=%s'%self.build_relpath,
'HOME=%s'%self.build_relpath,
'LANG=en_US.UTF-8'
], \
{
0: StdChal.null_fd,
1: StdChal.null_fd,
2: StdChal.null_fd,
}, \
self.build_relpath, 'container/standard', \
build_uid, build_gid, 60000, 1024 * 1024 * 1024, \
PyExt.RESTRICT_LEVEL_LOW)
if task_id is None:
callback(False)
else:
PyExt.start_task(task_id, _done_cb)
@concurrent.return_future
def judge(self, src_path, exe_relpath, argv, envp, check_ugid, test_ugid, \
test_relpath, test_param, metadata, callback=None):
def _check_started_cb(task_id):
nonlocal inpipe_fd
nonlocal outpipe_fd
nonlocal ansfile_fd
nonlocal check_infile_fd
os.close(inpipe_fd[1])
os.close(outpipe_fd[0])
if ansfile_fd is not None:
os.close(ansfile_fd)
if check_infile_fd is not None:
os.close(check_infile_fd)
def _test_started_cb(task_id):
nonlocal inpipe_fd
nonlocal outpipe_fd
nonlocal outfile_fd
nonlocal test_infile_fd
os.close(inpipe_fd[0])
os.close(outpipe_fd[1])
os.close(outfile_fd)
if test_infile_fd is not None:
os.close(test_infile_fd)
def _done_cb():
nonlocal result_stat
nonlocal result_pass
nonlocal verdict_path
if result_pass is not None and result_stat is not None:
with StackContext(Privilege.fileaccess):
verfile = open(verdict_path, 'r')
verdict = verfile.read(140)
verfile.close()
callback((result_pass, result_stat, verdict))
return
def _check_done_cb(task_id, stat):
nonlocal result_pass
if stat['detect_error'] == PyExt.DETECT_NONE:
result_pass = True
else:
result_pass = False
_done_cb()
def _test_done_cb(task_id, stat):
nonlocal result_stat
result_stat = (stat['utime'], stat['peakmem'], stat['detect_error'])
_done_cb()
result_stat = None
result_pass = None
in_path = test_param['in']
ans_path = test_param['ans']
timelimit = test_param['timelimit']
memlimit = test_param['memlimit']
check_uid, check_gid = check_ugid
test_uid, test_gid = test_ugid
test_path = self.container_path + test_relpath
output_relpath = test_relpath + '/output.txt'
output_path = self.container_path + output_relpath
verdict_relpath = test_relpath + '/verdict.txt'
verdict_path = self.container_path + verdict_relpath
with StackContext(Privilege.fileaccess):
os.mkdir(test_path, mode=0o771)
shutil.copyfile(src_path, test_path + '/a.out', \
follow_symlinks=False)
with StackContext(Privilege.fullaccess):
os.chown(test_path + '/a.out', test_uid, test_gid)
os.chmod(test_path + '/a.out', 0o500)
with StackContext(Privilege.fileaccess):
try:
check_infile_fd = os.open(in_path, os.O_RDONLY | os.O_CLOEXEC)
test_infile_fd = os.open(in_path, os.O_RDONLY | os.O_CLOEXEC)
except (FileNotFoundError, TypeError):
check_infile_fd = None
test_infile_fd = None
try:
ansfile_fd = os.open(ans_path, os.O_RDONLY | os.O_CLOEXEC)
except (FileNotFoundError, TypeError):
ansfile_fd = None
outfile_fd = os.open(output_path, \
os.O_WRONLY | os.O_CREAT | os.O_CLOEXEC, mode=0o400)
os.close(os.open(verdict_path,
os.O_CREAT | os.O_CLOEXEC, mode=0o640))
with StackContext(Privilege.fullaccess):
os.chown(output_path, check_uid, check_gid)
os.chown(verdict_path, check_uid, check_gid)
inpipe_fd = os.pipe2(os.O_CLOEXEC)
outpipe_fd = os.pipe2(os.O_CLOEXEC)
check_fdmap = {
0: StdChal.null_fd,
1: StdChal.null_fd,
2: StdChal.null_fd,
}
test_fdmap = {
0: StdChal.null_fd,
1: StdChal.null_fd,
2: StdChal.null_fd,
}
if check_infile_fd is not None:
check_fdmap[metadata['redir_check']['testin']] = check_infile_fd
if ansfile_fd is not None:
check_fdmap[metadata['redir_check']['ansin']] = ansfile_fd
check_fdmap[metadata['redir_check']['pipein']] = inpipe_fd[1]
check_fdmap[metadata['redir_check']['pipeout']] = outpipe_fd[0]
try:
del check_fdmap[-1]
except KeyError:
pass
if test_infile_fd is not None:
test_fdmap[metadata['redir_test']['testin']] = test_infile_fd
test_fdmap[metadata['redir_test']['testout']] = outfile_fd
test_fdmap[metadata['redir_test']['pipein']] = inpipe_fd[0]
test_fdmap[metadata['redir_test']['pipeout']] = outpipe_fd[1]
try:
del test_fdmap[-1]
except KeyError:
pass
check_task_id = PyExt.create_task(self.build_relpath + '/check', \
[], \
[
'PATH=/usr/bin:/bin',
'HOME=%s'%self.build_relpath,
'LANG=en_US.UTF-8',
'OUTPUT=%s'%output_relpath,
'VERDICT=%s'%verdict_relpath,
], \
check_fdmap, \
self.build_relpath, self.container_path, \
check_uid, check_gid, 60000, 1024 * 1024 * 1024, \
PyExt.RESTRICT_LEVEL_LOW)
if check_task_id is None:
callback((False, (0, 0, PyExt.DETECT_INTERNALERR), ''))
return
PyExt.start_task(check_task_id, _check_done_cb, _check_started_cb)
test_task_id = PyExt.create_task(exe_relpath, argv, envp, \
test_fdmap, \
test_relpath, self.container_path, \
test_uid, test_gid, timelimit, memlimit, \
PyExt.RESTRICT_LEVEL_HIGH)
if test_task_id is None:
callback((False, (0, 0, PyExt.DETECT_INTERNALERR), ''))
return
PyExt.start_task(test_task_id, _test_done_cb, _test_started_cb)
| true
| true
|
f70546cdac654426bc6a1c1eda3b202298692492
| 170
|
py
|
Python
|
Python/CodeForces Solutions/1-500/337A.py
|
7namansharma/Comp-Prog
|
b760ef9b4173e6d5851dc63cc92a8e935baf60ed
|
[
"MIT"
] | null | null | null |
Python/CodeForces Solutions/1-500/337A.py
|
7namansharma/Comp-Prog
|
b760ef9b4173e6d5851dc63cc92a8e935baf60ed
|
[
"MIT"
] | null | null | null |
Python/CodeForces Solutions/1-500/337A.py
|
7namansharma/Comp-Prog
|
b760ef9b4173e6d5851dc63cc92a8e935baf60ed
|
[
"MIT"
] | null | null | null |
n, m = map(int, input().split())
l = list(map(int, input().split()))
l.sort()
mini = l[m-1] - l[0]
for i in range(m-n+1):
mini = min(mini, l[i+n-1]-l[i])
print(mini)
| 21.25
| 35
| 0.547059
|
n, m = map(int, input().split())
l = list(map(int, input().split()))
l.sort()
mini = l[m-1] - l[0]
for i in range(m-n+1):
mini = min(mini, l[i+n-1]-l[i])
print(mini)
| true
| true
|
f705476175ce0e41b026038bb632673a3821a6ce
| 13,531
|
py
|
Python
|
ivy/functional/ivy/linear_algebra.py
|
Neel-Renavikar/ivy
|
644ab189a3a3fc52b1f3f86563226106e549eea3
|
[
"Apache-2.0"
] | null | null | null |
ivy/functional/ivy/linear_algebra.py
|
Neel-Renavikar/ivy
|
644ab189a3a3fc52b1f3f86563226106e549eea3
|
[
"Apache-2.0"
] | null | null | null |
ivy/functional/ivy/linear_algebra.py
|
Neel-Renavikar/ivy
|
644ab189a3a3fc52b1f3f86563226106e549eea3
|
[
"Apache-2.0"
] | null | null | null |
# global
from typing import Union, Optional, Tuple, Literal
from collections import namedtuple
# local
import ivy
from ivy.framework_handler import current_framework as _cur_framework
inf = float('inf')
# Array API Standard #
# -------------------#
def matrix_transpose(x: Union[ivy.Array, ivy.NativeArray])\
-> ivy.Array:
"""
Transposes a matrix (or a stack of matrices) ``x``.
Parameters
----------
x: array
input array having shape ``(..., M, N)`` and whose innermost two dimensions form ``MxN`` matrices.
Returns
-------
out: array
an array containing the transpose for each matrix and having shape ``(..., N, M)``. The returned array must have the same data type as ``x``.
"""
return _cur_framework(x).matrix_transpose(x)
# noinspection PyShadowingBuiltins
def vector_norm(x: Union[ivy.Array, ivy.NativeArray],
axis: Optional[Union[int, Tuple[int]]] = None,
keepdims: bool = False,
ord: Union[int, float, Literal[inf, -inf]] = 2)\
-> ivy.Array:
"""
Computes the vector norm of a vector (or batch of vectors) ``x``.
Parameters
----------
x:
input array. Should have a floating-point data type.
axis:
If an integer, ``axis`` specifies the axis (dimension) along which to compute vector norms. If an n-tuple, ``axis`` specifies the axes (dimensions) along which to compute batched vector norms. If ``None``, the vector norm must be computed over all array values (i.e., equivalent to computing the vector norm of a flattened array). Negative indices must be supported. Default: ``None``.
keepdims:
If ``True``, the axes (dimensions) specified by ``axis`` must be included in the result as singleton dimensions, and, accordingly, the result must be compatible with the input array (see :ref:`broadcasting`). Otherwise, if ``False``, the axes (dimensions) specified by ``axis`` must not be included in the result. Default: ``False``.
ord:
order of the norm. The following mathematical norms must be supported:
+------------------+----------------------------+
| ord | description |
+==================+============================+
| 1 | L1-norm (Manhattan) |
+------------------+----------------------------+
| 2 | L2-norm (Euclidean) |
+------------------+----------------------------+
| inf | infinity norm |
+------------------+----------------------------+
| (int,float >= 1) | p-norm |
+------------------+----------------------------+
The following non-mathematical "norms" must be supported:
+------------------+--------------------------------+
| ord | description |
+==================+================================+
| 0 | sum(a != 0) |
+------------------+--------------------------------+
| -1 | 1./sum(1./abs(a)) |
+------------------+--------------------------------+
| -2 | 1./sqrt(sum(1./abs(a)\*\*2)) |
+------------------+--------------------------------+
| -inf | min(abs(a)) |
+------------------+--------------------------------+
| (int,float < 1) | sum(abs(a)\*\*ord)\*\*(1./ord) |
+------------------+--------------------------------+
Default: ``2``.
Returns
-------
out:
an array containing the vector norms. If ``axis`` is ``None``, the returned array must be a zero-dimensional array containing a vector norm. If ``axis`` is a scalar value (``int`` or ``float``), the returned array must have a rank which is one less than the rank of ``x``. If ``axis`` is a ``n``-tuple, the returned array must have a rank which is ``n`` less than the rank of ``x``. The returned array must have a floating-point data type determined by :ref:`type-promotion`.
"""
if ord == -float('inf'):
return ivy.reduce_min(ivy.abs(x), axis, keepdims)
elif ord == float('inf'):
return ivy.reduce_max(ivy.abs(x), axis, keepdims)
elif ord == 0:
return ivy.reduce_sum(ivy.cast(x != 0, 'float32'), axis, keepdims)
x_raised = x ** ord
return ivy.reduce_sum(x_raised, axis, keepdims) ** (1/ord)
def svd(x:Union[ivy.Array,ivy.NativeArray],full_matrices: bool = True)->Union[ivy.Array, Tuple[ivy.Array,...]]:
"""
Singular Value Decomposition.
When x is a 2D array, it is factorized as u @ numpy.diag(s) @ vh = (u * s) @ vh, where u and vh are 2D unitary
arrays and s is a 1D array of a’s singular values. When x is higher-dimensional, SVD is applied in batched mode.
:param x: Input array with number of dimensions >= 2.
:type x: array
:return:
u -> { (…, M, M), (…, M, K) } array \n
Unitary array(s). The first (number of dims - 2) dimensions have the same size as those of the input a.
The size of the last two dimensions depends on the value of full_matrices.
s -> (…, K) array \n
Vector(s) with the singular values, within each vector sorted in descending ord.
The first (number of dims - 2) dimensions have the same size as those of the input a.
vh -> { (…, N, N), (…, K, N) } array \n
Unitary array(s). The first (number of dims - 2) dimensions have the same size as those of the input a.
The size of the last two dimensions depends on the value of full_matrices.
"""
return _cur_framework(x).svd(x,full_matrices)
def diagonal(x: ivy.Array,
offset: int = 0,
axis1: int = -2,
axis2: int = -1) -> ivy.Array:
"""
Returns the specified diagonals of a matrix (or a stack of matrices) ``x``.
Parameters
----------
x:
input array having shape ``(..., M, N)`` and whose innermost two dimensions form ``MxN`` matrices.
offset:
offset specifying the off-diagonal relative to the main diagonal.
- ``offset = 0``: the main diagonal.
- ``offset > 0``: off-diagonal above the main diagonal.
- ``offset < 0``: off-diagonal below the main diagonal.
Default: `0`.
axis1:
axis to be used as the first axis of the 2-D sub-arrays from which the diagonals should be taken.
Defaults to first axis (0).
axis2:
axis to be used as the second axis of the 2-D sub-arrays from which the diagonals should be taken.
Defaults to second axis (1).
Returns
-------
out:
an array containing the diagonals and whose shape is determined by removing the last two dimensions and appending a dimension equal to the size of the resulting diagonals. The returned array must have the same data type as ``x``.
"""
return _cur_framework(x).diagonal(x, offset, axis1=axis1, axis2=axis2)
def inv(x):
"""
Computes the (multiplicative) inverse of x matrix.
Given a square matrix x, returns the matrix x_inv satisfying dot(x, x_inv) = dot(x_inv, x) = eye(x.shape[0]).
:param x: Matrix to be inverted.
:type x: array
:return: (Multiplicative) inverse of the matrix x.
"""
return _cur_framework(x).inv(x)
def pinv(x):
"""
Computes the pseudo inverse of x matrix.
:param x: Matrix to be pseudo inverted.
:type x: array
:return: pseudo inverse of the matrix x.
"""
return _cur_framework(x).pinv(x)
def qr(x: ivy.Array,
mode: str = 'reduced') -> namedtuple('qr', ['Q', 'R']):
"""
Returns the qr decomposition x = QR of a full column rank matrix (or a stack of matrices), where Q is an orthonormal matrix (or a stack of matrices) and R is an upper-triangular matrix (or a stack of matrices).
Parameters
----------
x:
input array having shape (..., M, N) and whose innermost two dimensions form MxN matrices of rank N. Should have a floating-point data type.
mode:
decomposition mode. Should be one of the following modes:
- 'reduced': compute only the leading K columns of q, such that q and r have dimensions (..., M, K) and (..., K, N), respectively, and where K = min(M, N).
- 'complete': compute q and r with dimensions (..., M, M) and (..., M, N), respectively.
Default: 'reduced'.
Returns
-------
out:
a namedtuple (Q, R) whose
- first element must have the field name Q and must be an array whose shape depends on the value of mode and contain matrices with orthonormal columns. If mode is 'complete', the array must have shape (..., M, M). If mode is 'reduced', the array must have shape (..., M, K), where K = min(M, N). The first x.ndim-2 dimensions must have the same size as those of the input array x.
- second element must have the field name R and must be an array whose shape depends on the value of mode and contain upper-triangular matrices. If mode is 'complete', the array must have shape (..., M, N). If mode is 'reduced', the array must have shape (..., K, N), where K = min(M, N). The first x.ndim-2 dimensions must have the same size as those of the input x.
"""
return _cur_framework(x).qr(x, mode)
def matmul(x1: Union[ivy.Array, ivy.NativeArray],
x2: Union[ivy.Array, ivy.NativeArray]) -> ivy.Array:
"""
Computes the matrix product.
Parameters
----------
x1:
x1 (array) – first input array. Should have a numeric data type. Must have at least one dimension.
x2:
x2 (array) – second input array. Should have a numeric data type. Must have at least one dimension.
Returns
-------
out(array):
if both x1 and x2 are one-dimensional arrays having shape (N,), a zero-dimensional array containing the inner product as its only element.
if x1 is a two-dimensional array having shape (M, K) and x2 is a two-dimensional array having shape (K, N), a two-dimensional array containing the conventional matrix product and having shape (M, N).
if x1 is a one-dimensional array having shape (K,) and x2 is an array having shape (..., K, N), an array having shape (..., N) (i.e., prepended dimensions during vector-to-matrix promotion must be removed) and containing the conventional matrix product.
if x1 is an array having shape (..., M, K) and x2 is a one-dimensional array having shape (K,), an array having shape (..., M) (i.e., appended dimensions during vector-to-matrix promotion must be removed) and containing the conventional matrix product.
if x1 is a two-dimensional array having shape (M, K) and x2 is an array having shape (..., K, N), an array having shape (..., M, N) and containing the conventional matrix product for each stacked matrix.
if x1 is an array having shape (..., M, K) and x2 is a two-dimensional array having shape (K, N), an array having shape (..., M, N) and containing the conventional matrix product for each stacked matrix.
if either x1 or x2 has more than two dimensions, an array having a shape determined by Broadcasting shape(x1)[:-2] against shape(x2)[:-2] and containing the conventional matrix product for each stacked matrix.
Raises
------
if either x1 or x2 is a zero-dimensional array.
if x1 is a one-dimensional array having shape (K,), x2 is a one-dimensional array having shape (L,), and K != L.
if x1 is a one-dimensional array having shape (K,), x2 is an array having shape (..., L, N), and K != L.
if x1 is an array having shape (..., M, K), x2 is a one-dimensional array having shape (L,), and K != L.
if x1 is an array having shape (..., M, K), x2 is an array having shape (..., L, N), and K != L.
"""
return _cur_framework(x1).matmul(x1, x2)
def slodget(x: Union[ivy.Array, ivy.NativeArray],) \
-> ivy.Array:
"""
Computes the sign and natural logarithm of the determinant of an array.
Parameters
----------
x:
This is a 2D array, and it has to be square
Return
----------
Out:
This function returns two values -
sign:
A number representing the sign of the determinant.
logdet:
The natural log of the absolute value of the determinant.
"""
return _cur_framework(x).slodget(x)
def svdvals(x: Union[ivy.Array, ivy.NativeArray],) \
-> ivy.Array:
"""
Returns the singular values of a matrix (or a stack of matrices) ``x``.
Parameters
----------
x:
input array having shape ``(..., M, N)`` and whose innermost two dimensions form ``MxN`` matrices.
Return
----------
Out:
array with shape ``(..., K)`` that contains the vector(s) of singular values of length ``K``, where K = min(M, N).
The values are sorted in descending order by magnitude.
"""
return _cur_framework(x).svdvals(x)
def trace(x: ivy.Array,
offset: int = 0)\
-> ivy.Array:
"""
Computes the sum of the diagonal of an array.
Parameters
----------
x:
This is an array.
Return
----------
Out:
This function returns two values -
sum:
The sum of the diagonals along an axis.
"""
return _cur_framework(x).trace(x, offset)
# Extra #
# ------#
| 44.953488
| 483
| 0.578302
|
from typing import Union, Optional, Tuple, Literal
from collections import namedtuple
import ivy
from ivy.framework_handler import current_framework as _cur_framework
inf = float('inf')
def matrix_transpose(x: Union[ivy.Array, ivy.NativeArray])\
-> ivy.Array:
return _cur_framework(x).matrix_transpose(x)
def vector_norm(x: Union[ivy.Array, ivy.NativeArray],
axis: Optional[Union[int, Tuple[int]]] = None,
keepdims: bool = False,
ord: Union[int, float, Literal[inf, -inf]] = 2)\
-> ivy.Array:
if ord == -float('inf'):
return ivy.reduce_min(ivy.abs(x), axis, keepdims)
elif ord == float('inf'):
return ivy.reduce_max(ivy.abs(x), axis, keepdims)
elif ord == 0:
return ivy.reduce_sum(ivy.cast(x != 0, 'float32'), axis, keepdims)
x_raised = x ** ord
return ivy.reduce_sum(x_raised, axis, keepdims) ** (1/ord)
def svd(x:Union[ivy.Array,ivy.NativeArray],full_matrices: bool = True)->Union[ivy.Array, Tuple[ivy.Array,...]]:
return _cur_framework(x).svd(x,full_matrices)
def diagonal(x: ivy.Array,
offset: int = 0,
axis1: int = -2,
axis2: int = -1) -> ivy.Array:
return _cur_framework(x).diagonal(x, offset, axis1=axis1, axis2=axis2)
def inv(x):
return _cur_framework(x).inv(x)
def pinv(x):
return _cur_framework(x).pinv(x)
def qr(x: ivy.Array,
mode: str = 'reduced') -> namedtuple('qr', ['Q', 'R']):
return _cur_framework(x).qr(x, mode)
def matmul(x1: Union[ivy.Array, ivy.NativeArray],
x2: Union[ivy.Array, ivy.NativeArray]) -> ivy.Array:
return _cur_framework(x1).matmul(x1, x2)
def slodget(x: Union[ivy.Array, ivy.NativeArray],) \
-> ivy.Array:
return _cur_framework(x).slodget(x)
def svdvals(x: Union[ivy.Array, ivy.NativeArray],) \
-> ivy.Array:
return _cur_framework(x).svdvals(x)
def trace(x: ivy.Array,
offset: int = 0)\
-> ivy.Array:
return _cur_framework(x).trace(x, offset)
| true
| true
|
f70547e8199382ef401382f89593188a5674649a
| 1,195
|
py
|
Python
|
backend/surfsara/models/task.py
|
sara-nl/data-exchange
|
52b9c2554a52b56686f3a06f583a7a6454bf6df6
|
[
"Apache-2.0"
] | 4
|
2020-12-03T14:13:29.000Z
|
2021-04-19T03:03:19.000Z
|
backend/surfsara/models/task.py
|
sara-nl/data-exchange
|
52b9c2554a52b56686f3a06f583a7a6454bf6df6
|
[
"Apache-2.0"
] | null | null | null |
backend/surfsara/models/task.py
|
sara-nl/data-exchange
|
52b9c2554a52b56686f3a06f583a7a6454bf6df6
|
[
"Apache-2.0"
] | null | null | null |
from django.contrib.postgres.fields import JSONField
from django.db import models
from surfsara.models.permission import Permission
class Task(models.Model):
RUNNING = "running"
SUCCESS = "success"
ERROR = "error"
OUTPUT_RELEASED = "output_released"
RELEASE_REJECTED = "release_rejected"
TASK_STATES = (
(RUNNING, "Running"),
(SUCCESS, "Success"),
(ERROR, "Error"),
(OUTPUT_RELEASED, "Output Released"),
(RELEASE_REJECTED, "Release Rejected"),
)
id = models.AutoField(primary_key=True)
state = models.CharField(max_length=255, choices=TASK_STATES)
progress_state = JSONField(null=True)
author_email = models.EmailField()
approver_email = models.EmailField()
algorithm = models.TextField()
algorithm_storage = models.TextField()
dataset = models.TextField()
dataset_storage = models.TextField()
output = models.TextField(null=True)
review_output = models.BooleanField(default=True)
permission = models.ForeignKey(Permission, null=True, on_delete=models.SET_NULL)
registered_on = models.DateTimeField(auto_now_add=True)
updated_on = models.DateTimeField(auto_now=True)
| 34.142857
| 84
| 0.711297
|
from django.contrib.postgres.fields import JSONField
from django.db import models
from surfsara.models.permission import Permission
class Task(models.Model):
RUNNING = "running"
SUCCESS = "success"
ERROR = "error"
OUTPUT_RELEASED = "output_released"
RELEASE_REJECTED = "release_rejected"
TASK_STATES = (
(RUNNING, "Running"),
(SUCCESS, "Success"),
(ERROR, "Error"),
(OUTPUT_RELEASED, "Output Released"),
(RELEASE_REJECTED, "Release Rejected"),
)
id = models.AutoField(primary_key=True)
state = models.CharField(max_length=255, choices=TASK_STATES)
progress_state = JSONField(null=True)
author_email = models.EmailField()
approver_email = models.EmailField()
algorithm = models.TextField()
algorithm_storage = models.TextField()
dataset = models.TextField()
dataset_storage = models.TextField()
output = models.TextField(null=True)
review_output = models.BooleanField(default=True)
permission = models.ForeignKey(Permission, null=True, on_delete=models.SET_NULL)
registered_on = models.DateTimeField(auto_now_add=True)
updated_on = models.DateTimeField(auto_now=True)
| true
| true
|
f705482cb111e6ec8cc98d518959f511b2ffb01f
| 4,422
|
py
|
Python
|
AtomicASTChangeMining/src/test/resources/ASTConversion/sklearn/utils/tests/test_seq_dataset.py
|
maldil/CPATMiner2.0
|
743aa8a5b638a1963e621f59f63d794728ab0c79
|
[
"Apache-2.0"
] | 4
|
2021-11-04T02:47:31.000Z
|
2022-01-25T02:04:05.000Z
|
AtomicASTChangeMining/src/test/resources/ASTConversion/sklearn/utils/tests/test_seq_dataset.py
|
maldil/R-CPATMiner
|
88b96a5af438a9c2ea2dab351cb8b210119132a2
|
[
"Apache-2.0"
] | null | null | null |
AtomicASTChangeMining/src/test/resources/ASTConversion/sklearn/utils/tests/test_seq_dataset.py
|
maldil/R-CPATMiner
|
88b96a5af438a9c2ea2dab351cb8b210119132a2
|
[
"Apache-2.0"
] | 1
|
2021-09-11T06:52:39.000Z
|
2021-09-11T06:52:39.000Z
|
# Author: Tom Dupre la Tour
# Joan Massich <mailsik@gmail.com>
#
# License: BSD 3 clause
import numpy as np
import pytest
import scipy.sparse as sp
from numpy.testing import assert_array_equal
from sklearn.utils._seq_dataset import (
ArrayDataset32, ArrayDataset64, CSRDataset32, CSRDataset64)
from sklearn.datasets import load_iris
from sklearn.utils._testing import assert_allclose
iris = load_iris()
X64 = iris.data.astype(np.float64)
y64 = iris.target.astype(np.float64)
X_csr64 = sp.csr_matrix(X64)
sample_weight64 = np.arange(y64.size, dtype=np.float64)
X32 = iris.data.astype(np.float32)
y32 = iris.target.astype(np.float32)
X_csr32 = sp.csr_matrix(X32)
sample_weight32 = np.arange(y32.size, dtype=np.float32)
def assert_csr_equal_values(current, expected):
current.eliminate_zeros()
expected.eliminate_zeros()
expected = expected.astype(current.dtype)
assert current.shape[0] == expected.shape[0]
assert current.shape[1] == expected.shape[1]
assert_array_equal(current.data, expected.data)
assert_array_equal(current.indices, expected.indices)
assert_array_equal(current.indptr, expected.indptr)
def make_dense_dataset_32():
return ArrayDataset32(X32, y32, sample_weight32, seed=42)
def make_dense_dataset_64():
return ArrayDataset64(X64, y64, sample_weight64, seed=42)
def make_sparse_dataset_32():
return CSRDataset32(X_csr32.data, X_csr32.indptr, X_csr32.indices, y32,
sample_weight32, seed=42)
def make_sparse_dataset_64():
return CSRDataset64(X_csr64.data, X_csr64.indptr, X_csr64.indices, y64,
sample_weight64, seed=42)
@pytest.mark.parametrize('dataset_constructor', [
make_dense_dataset_32,
make_dense_dataset_64,
make_sparse_dataset_32,
make_sparse_dataset_64,
])
def test_seq_dataset_basic_iteration(dataset_constructor):
NUMBER_OF_RUNS = 5
dataset = dataset_constructor()
for _ in range(NUMBER_OF_RUNS):
# next sample
xi_, yi, swi, idx = dataset._next_py()
xi = sp.csr_matrix((xi_), shape=(1, X64.shape[1]))
assert_csr_equal_values(xi, X_csr64[idx])
assert yi == y64[idx]
assert swi == sample_weight64[idx]
# random sample
xi_, yi, swi, idx = dataset._random_py()
xi = sp.csr_matrix((xi_), shape=(1, X64.shape[1]))
assert_csr_equal_values(xi, X_csr64[idx])
assert yi == y64[idx]
assert swi == sample_weight64[idx]
@pytest.mark.parametrize('make_dense_dataset,make_sparse_dataset', [
(make_dense_dataset_32, make_sparse_dataset_32),
(make_dense_dataset_64, make_sparse_dataset_64),
])
def test_seq_dataset_shuffle(make_dense_dataset, make_sparse_dataset):
dense_dataset, sparse_dataset = make_dense_dataset(), make_sparse_dataset()
# not shuffled
for i in range(5):
_, _, _, idx1 = dense_dataset._next_py()
_, _, _, idx2 = sparse_dataset._next_py()
assert idx1 == i
assert idx2 == i
for i in [132, 50, 9, 18, 58]:
_, _, _, idx1 = dense_dataset._random_py()
_, _, _, idx2 = sparse_dataset._random_py()
assert idx1 == i
assert idx2 == i
seed = 77
dense_dataset._shuffle_py(seed)
sparse_dataset._shuffle_py(seed)
idx_next = [63, 91, 148, 87, 29]
idx_shuffle = [137, 125, 56, 121, 127]
for i, j in zip(idx_next, idx_shuffle):
_, _, _, idx1 = dense_dataset._next_py()
_, _, _, idx2 = sparse_dataset._next_py()
assert idx1 == i
assert idx2 == i
_, _, _, idx1 = dense_dataset._random_py()
_, _, _, idx2 = sparse_dataset._random_py()
assert idx1 == j
assert idx2 == j
@pytest.mark.parametrize('make_dataset_32,make_dataset_64', [
(make_dense_dataset_32, make_dense_dataset_64),
(make_sparse_dataset_32, make_sparse_dataset_64),
])
def test_fused_types_consistency(make_dataset_32, make_dataset_64):
dataset_32, dataset_64 = make_dataset_32(), make_dataset_64()
NUMBER_OF_RUNS = 5
for _ in range(NUMBER_OF_RUNS):
# next sample
(xi_data32, _, _), yi32, _, _ = dataset_32._next_py()
(xi_data64, _, _), yi64, _, _ = dataset_64._next_py()
assert xi_data32.dtype == np.float32
assert xi_data64.dtype == np.float64
assert_allclose(xi_data64, xi_data32, rtol=1e-5)
assert_allclose(yi64, yi32, rtol=1e-5)
| 31.361702
| 79
| 0.688602
|
import numpy as np
import pytest
import scipy.sparse as sp
from numpy.testing import assert_array_equal
from sklearn.utils._seq_dataset import (
ArrayDataset32, ArrayDataset64, CSRDataset32, CSRDataset64)
from sklearn.datasets import load_iris
from sklearn.utils._testing import assert_allclose
iris = load_iris()
X64 = iris.data.astype(np.float64)
y64 = iris.target.astype(np.float64)
X_csr64 = sp.csr_matrix(X64)
sample_weight64 = np.arange(y64.size, dtype=np.float64)
X32 = iris.data.astype(np.float32)
y32 = iris.target.astype(np.float32)
X_csr32 = sp.csr_matrix(X32)
sample_weight32 = np.arange(y32.size, dtype=np.float32)
def assert_csr_equal_values(current, expected):
current.eliminate_zeros()
expected.eliminate_zeros()
expected = expected.astype(current.dtype)
assert current.shape[0] == expected.shape[0]
assert current.shape[1] == expected.shape[1]
assert_array_equal(current.data, expected.data)
assert_array_equal(current.indices, expected.indices)
assert_array_equal(current.indptr, expected.indptr)
def make_dense_dataset_32():
return ArrayDataset32(X32, y32, sample_weight32, seed=42)
def make_dense_dataset_64():
return ArrayDataset64(X64, y64, sample_weight64, seed=42)
def make_sparse_dataset_32():
return CSRDataset32(X_csr32.data, X_csr32.indptr, X_csr32.indices, y32,
sample_weight32, seed=42)
def make_sparse_dataset_64():
return CSRDataset64(X_csr64.data, X_csr64.indptr, X_csr64.indices, y64,
sample_weight64, seed=42)
@pytest.mark.parametrize('dataset_constructor', [
make_dense_dataset_32,
make_dense_dataset_64,
make_sparse_dataset_32,
make_sparse_dataset_64,
])
def test_seq_dataset_basic_iteration(dataset_constructor):
NUMBER_OF_RUNS = 5
dataset = dataset_constructor()
for _ in range(NUMBER_OF_RUNS):
xi_, yi, swi, idx = dataset._next_py()
xi = sp.csr_matrix((xi_), shape=(1, X64.shape[1]))
assert_csr_equal_values(xi, X_csr64[idx])
assert yi == y64[idx]
assert swi == sample_weight64[idx]
xi_, yi, swi, idx = dataset._random_py()
xi = sp.csr_matrix((xi_), shape=(1, X64.shape[1]))
assert_csr_equal_values(xi, X_csr64[idx])
assert yi == y64[idx]
assert swi == sample_weight64[idx]
@pytest.mark.parametrize('make_dense_dataset,make_sparse_dataset', [
(make_dense_dataset_32, make_sparse_dataset_32),
(make_dense_dataset_64, make_sparse_dataset_64),
])
def test_seq_dataset_shuffle(make_dense_dataset, make_sparse_dataset):
dense_dataset, sparse_dataset = make_dense_dataset(), make_sparse_dataset()
for i in range(5):
_, _, _, idx1 = dense_dataset._next_py()
_, _, _, idx2 = sparse_dataset._next_py()
assert idx1 == i
assert idx2 == i
for i in [132, 50, 9, 18, 58]:
_, _, _, idx1 = dense_dataset._random_py()
_, _, _, idx2 = sparse_dataset._random_py()
assert idx1 == i
assert idx2 == i
seed = 77
dense_dataset._shuffle_py(seed)
sparse_dataset._shuffle_py(seed)
idx_next = [63, 91, 148, 87, 29]
idx_shuffle = [137, 125, 56, 121, 127]
for i, j in zip(idx_next, idx_shuffle):
_, _, _, idx1 = dense_dataset._next_py()
_, _, _, idx2 = sparse_dataset._next_py()
assert idx1 == i
assert idx2 == i
_, _, _, idx1 = dense_dataset._random_py()
_, _, _, idx2 = sparse_dataset._random_py()
assert idx1 == j
assert idx2 == j
@pytest.mark.parametrize('make_dataset_32,make_dataset_64', [
(make_dense_dataset_32, make_dense_dataset_64),
(make_sparse_dataset_32, make_sparse_dataset_64),
])
def test_fused_types_consistency(make_dataset_32, make_dataset_64):
dataset_32, dataset_64 = make_dataset_32(), make_dataset_64()
NUMBER_OF_RUNS = 5
for _ in range(NUMBER_OF_RUNS):
(xi_data32, _, _), yi32, _, _ = dataset_32._next_py()
(xi_data64, _, _), yi64, _, _ = dataset_64._next_py()
assert xi_data32.dtype == np.float32
assert xi_data64.dtype == np.float64
assert_allclose(xi_data64, xi_data32, rtol=1e-5)
assert_allclose(yi64, yi32, rtol=1e-5)
| true
| true
|
f705483d25bdc6d0944fd7a8786d200878fbb456
| 6,799
|
py
|
Python
|
registry.py
|
redheads/registry-image-check
|
989b676b0159607dc51fc7fbc010d56fdd4a197c
|
[
"Apache-2.0"
] | null | null | null |
registry.py
|
redheads/registry-image-check
|
989b676b0159607dc51fc7fbc010d56fdd4a197c
|
[
"Apache-2.0"
] | null | null | null |
registry.py
|
redheads/registry-image-check
|
989b676b0159607dc51fc7fbc010d56fdd4a197c
|
[
"Apache-2.0"
] | 1
|
2021-04-23T13:01:11.000Z
|
2021-04-23T13:01:11.000Z
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
# bug-report: feilengcui008@gmail.com
""" api for docker registry """
import urllib2
import urllib
import json
import base64
class RegistryException(Exception):
""" registry api related exception """
pass
class RegistryApi(object):
""" interact with docker registry and harbor """
def __init__(self, username, password, registry_endpoint):
self.username = username
self.password = password
self.basic_token = base64.encodestring("%s:%s" % (str(username), str(password)))[0:-1]
self.registry_endpoint = registry_endpoint.rstrip('/')
#print("%s/v2/_catalog" % (self.registry_endpoint,))
auth = self.pingRegistry("%s/v2/_catalog" % (self.registry_endpoint,))
if auth is None:
raise RegistryException("get token realm and service failed")
self.token_endpoint = auth[0]
self.service = auth[1]
def pingRegistry(self, registry_endpoint):
""" ping v2 registry and get realm and service """
headers = dict()
try:
res = urllib2.urlopen(registry_endpoint)
except urllib2.HTTPError as e:
headers = e.hdrs.dict
try:
(realm, service, _) = headers['www-authenticate'].split(',')
return (realm[14:-1:], service[9:-1])
except Exception as e:
return None
def getBearerTokenForScope(self, scope):
""" get bearer token from harbor """
payload = urllib.urlencode({'service': self.service, 'scope': scope})
url = "%s?%s" % (self.token_endpoint, payload)
req = urllib2.Request(url)
req.add_header('Authorization', 'Basic %s' % (self.basic_token,))
try:
response = urllib2.urlopen(req)
return json.loads(response.read())["token"]
except Exception as e:
return None
def getRepositoryList(self, n=None):
""" get repository list """
scope = "registry:catalog:*"
bear_token = self.getBearerTokenForScope(scope)
if bear_token is None:
return None
url = "%s/v2/_catalog" % (self.registry_endpoint,)
if n is not None:
url = "%s?n=%s" % (url, str(n))
req = urllib2.Request(url)
req.add_header('Authorization', r'Bearer %s' % (bear_token,))
try:
response = urllib2.urlopen(req)
return json.loads(response.read())
except Exception as e:
return None
def getTagList(self, repository):
""" get tag list for repository """
scope = "repository:%s:pull" % (repository,)
bear_token = self.getBearerTokenForScope(scope)
if bear_token is None:
return None
url = "%s/v2/%s/tags/list" % (self.registry_endpoint, repository)
req = urllib2.Request(url)
req.add_header('Authorization', r'Bearer %s' % (bear_token,))
try:
response = urllib2.urlopen(req)
return json.loads(response.read())
except Exception as e:
return None
def getManifest(self, repository, reference="latest", v1=False):
""" get manifest for tag or digest """
scope = "repository:%s:pull" % (repository,)
bear_token = self.getBearerTokenForScope(scope)
if bear_token is None:
return None
url = "%s/v2/%s/manifests/%s" % (self.registry_endpoint, repository, reference)
req = urllib2.Request(url)
req.get_method = lambda: 'GET'
req.add_header('Authorization', r'Bearer %s' % (bear_token,))
req.add_header('Accept', 'application/vnd.docker.distribution.manifest.v2+json')
if v1:
req.add_header('Accept', 'application/vnd.docker.distribution.manifest.v1+json')
try:
response = urllib2.urlopen(req)
return json.loads(response.read())
except Exception as e:
return None
def existManifest(self, repository, reference, v1=False):
""" check to see it manifest exist """
scope = "repository:%s:pull" % (repository,)
bear_token = self.getBearerTokenForScope(scope)
if bear_token is None:
raise RegistryException("manifestExist failed due to token error")
url = "%s/v2/%s/manifests/%s" % (self.registry_endpoint, repository, reference)
req = urllib2.Request(url)
req.get_method = lambda: 'HEAD'
req.add_header('Authorization', r'Bearer %s' % (bear_token,))
req.add_header('Accept', 'application/vnd.docker.distribution.manifest.v2+json')
if v1:
req.add_header('Accept', 'application/vnd.docker.distribution.manifest.v1+json')
try:
response = urllib2.urlopen(req)
return (True, response.headers.dict["docker-content-digest"])
except Exception as e:
return (False, None)
def deleteManifest(self, repository, reference):
""" delete manifest by tag """
(is_exist, digest) = self.existManifest(repository, reference)
if not is_exist:
raise RegistryException("manifest not exist")
scope = "repository:%s:pull,push" % (repository,)
bear_token = self.getBearerTokenForScope(scope)
if bear_token is None:
raise RegistryException("delete manifest failed due to token error")
url = "%s/v2/%s/manifests/%s" % (self.registry_endpoint, repository, digest)
req = urllib2.Request(url)
req.get_method = lambda: 'DELETE'
req.add_header('Authorization', r'Bearer %s' % (bear_token,))
try:
urllib2.urlopen(req)
except Exception as e:
return False
return True
def getManifestWithConf(self, repository, reference="latest"):
""" get manifest for tag or digest """
manifest = self.getManifest(repository, reference)
if manifest is None:
raise RegistryException("manifest for %s %s not exist" % (repository, reference))
config_digest = manifest["config"]["digest"]
scope = "repository:%s:pull" % (repository,)
bear_token = self.getBearerTokenForScope(scope)
if bear_token is None:
return None
url = "%s/v2/%s/blobs/%s" % (self.registry_endpoint, repository, config_digest)
req = urllib2.Request(url)
req.get_method = lambda: 'GET'
req.add_header('Authorization', r'Bearer %s' % (bear_token,))
req.add_header('Accept', 'application/vnd.docker.distribution.manifest.v2+json')
try:
response = urllib2.urlopen(req)
manifest["configContent"] = json.loads(response.read())
return manifest
except Exception as e:
return None
| 40.712575
| 94
| 0.610531
|
import urllib2
import urllib
import json
import base64
class RegistryException(Exception):
pass
class RegistryApi(object):
def __init__(self, username, password, registry_endpoint):
self.username = username
self.password = password
self.basic_token = base64.encodestring("%s:%s" % (str(username), str(password)))[0:-1]
self.registry_endpoint = registry_endpoint.rstrip('/')
auth = self.pingRegistry("%s/v2/_catalog" % (self.registry_endpoint,))
if auth is None:
raise RegistryException("get token realm and service failed")
self.token_endpoint = auth[0]
self.service = auth[1]
def pingRegistry(self, registry_endpoint):
headers = dict()
try:
res = urllib2.urlopen(registry_endpoint)
except urllib2.HTTPError as e:
headers = e.hdrs.dict
try:
(realm, service, _) = headers['www-authenticate'].split(',')
return (realm[14:-1:], service[9:-1])
except Exception as e:
return None
def getBearerTokenForScope(self, scope):
payload = urllib.urlencode({'service': self.service, 'scope': scope})
url = "%s?%s" % (self.token_endpoint, payload)
req = urllib2.Request(url)
req.add_header('Authorization', 'Basic %s' % (self.basic_token,))
try:
response = urllib2.urlopen(req)
return json.loads(response.read())["token"]
except Exception as e:
return None
def getRepositoryList(self, n=None):
scope = "registry:catalog:*"
bear_token = self.getBearerTokenForScope(scope)
if bear_token is None:
return None
url = "%s/v2/_catalog" % (self.registry_endpoint,)
if n is not None:
url = "%s?n=%s" % (url, str(n))
req = urllib2.Request(url)
req.add_header('Authorization', r'Bearer %s' % (bear_token,))
try:
response = urllib2.urlopen(req)
return json.loads(response.read())
except Exception as e:
return None
def getTagList(self, repository):
scope = "repository:%s:pull" % (repository,)
bear_token = self.getBearerTokenForScope(scope)
if bear_token is None:
return None
url = "%s/v2/%s/tags/list" % (self.registry_endpoint, repository)
req = urllib2.Request(url)
req.add_header('Authorization', r'Bearer %s' % (bear_token,))
try:
response = urllib2.urlopen(req)
return json.loads(response.read())
except Exception as e:
return None
def getManifest(self, repository, reference="latest", v1=False):
scope = "repository:%s:pull" % (repository,)
bear_token = self.getBearerTokenForScope(scope)
if bear_token is None:
return None
url = "%s/v2/%s/manifests/%s" % (self.registry_endpoint, repository, reference)
req = urllib2.Request(url)
req.get_method = lambda: 'GET'
req.add_header('Authorization', r'Bearer %s' % (bear_token,))
req.add_header('Accept', 'application/vnd.docker.distribution.manifest.v2+json')
if v1:
req.add_header('Accept', 'application/vnd.docker.distribution.manifest.v1+json')
try:
response = urllib2.urlopen(req)
return json.loads(response.read())
except Exception as e:
return None
def existManifest(self, repository, reference, v1=False):
scope = "repository:%s:pull" % (repository,)
bear_token = self.getBearerTokenForScope(scope)
if bear_token is None:
raise RegistryException("manifestExist failed due to token error")
url = "%s/v2/%s/manifests/%s" % (self.registry_endpoint, repository, reference)
req = urllib2.Request(url)
req.get_method = lambda: 'HEAD'
req.add_header('Authorization', r'Bearer %s' % (bear_token,))
req.add_header('Accept', 'application/vnd.docker.distribution.manifest.v2+json')
if v1:
req.add_header('Accept', 'application/vnd.docker.distribution.manifest.v1+json')
try:
response = urllib2.urlopen(req)
return (True, response.headers.dict["docker-content-digest"])
except Exception as e:
return (False, None)
def deleteManifest(self, repository, reference):
(is_exist, digest) = self.existManifest(repository, reference)
if not is_exist:
raise RegistryException("manifest not exist")
scope = "repository:%s:pull,push" % (repository,)
bear_token = self.getBearerTokenForScope(scope)
if bear_token is None:
raise RegistryException("delete manifest failed due to token error")
url = "%s/v2/%s/manifests/%s" % (self.registry_endpoint, repository, digest)
req = urllib2.Request(url)
req.get_method = lambda: 'DELETE'
req.add_header('Authorization', r'Bearer %s' % (bear_token,))
try:
urllib2.urlopen(req)
except Exception as e:
return False
return True
def getManifestWithConf(self, repository, reference="latest"):
manifest = self.getManifest(repository, reference)
if manifest is None:
raise RegistryException("manifest for %s %s not exist" % (repository, reference))
config_digest = manifest["config"]["digest"]
scope = "repository:%s:pull" % (repository,)
bear_token = self.getBearerTokenForScope(scope)
if bear_token is None:
return None
url = "%s/v2/%s/blobs/%s" % (self.registry_endpoint, repository, config_digest)
req = urllib2.Request(url)
req.get_method = lambda: 'GET'
req.add_header('Authorization', r'Bearer %s' % (bear_token,))
req.add_header('Accept', 'application/vnd.docker.distribution.manifest.v2+json')
try:
response = urllib2.urlopen(req)
manifest["configContent"] = json.loads(response.read())
return manifest
except Exception as e:
return None
| true
| true
|
f70548590f17b2348d1c6961c358ea744b865263
| 2,283
|
py
|
Python
|
src/examples/vision/image_classification_camera.py
|
SanchitMisal/aiyprojects-raspbian
|
d148b2b4f427cd6ed240f338f260f277ead50264
|
[
"Apache-2.0"
] | 1,610
|
2017-05-04T13:41:19.000Z
|
2022-03-31T14:55:55.000Z
|
src/examples/vision/image_classification_camera.py
|
SanchitMisal/aiyprojects-raspbian
|
d148b2b4f427cd6ed240f338f260f277ead50264
|
[
"Apache-2.0"
] | 716
|
2017-05-04T13:37:27.000Z
|
2022-03-04T09:42:48.000Z
|
src/examples/vision/image_classification_camera.py
|
SanchitMisal/aiyprojects-raspbian
|
d148b2b4f427cd6ed240f338f260f277ead50264
|
[
"Apache-2.0"
] | 761
|
2017-05-04T16:00:31.000Z
|
2022-03-27T23:18:46.000Z
|
#!/usr/bin/env python3
#
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Camera image classification demo code.
Runs continuous image classification on camera frames and prints detected object
classes.
Example:
image_classification_camera.py --num_frames 10
"""
import argparse
import contextlib
from aiy.vision.inference import CameraInference
from aiy.vision.models import image_classification
from picamera import PiCamera
def classes_info(classes):
return ', '.join('%s (%.2f)' % pair for pair in classes)
@contextlib.contextmanager
def CameraPreview(camera, enabled):
if enabled:
camera.start_preview()
try:
yield
finally:
if enabled:
camera.stop_preview()
def main():
parser = argparse.ArgumentParser('Image classification camera inference example.')
parser.add_argument('--num_frames', '-n', type=int, default=None,
help='Sets the number of frames to run for, otherwise runs forever.')
parser.add_argument('--num_objects', '-c', type=int, default=3,
help='Sets the number of object interences to print.')
parser.add_argument('--nopreview', dest='preview', action='store_false', default=True,
help='Enable camera preview')
args = parser.parse_args()
with PiCamera(sensor_mode=4, framerate=30) as camera, \
CameraPreview(camera, enabled=args.preview), \
CameraInference(image_classification.model()) as inference:
for result in inference.run(args.num_frames):
classes = image_classification.get_classes(result, top_k=args.num_objects)
print(classes_info(classes))
if classes:
camera.annotate_text = '%s (%.2f)' % classes[0]
if __name__ == '__main__':
main()
| 35.123077
| 90
| 0.710907
|
import argparse
import contextlib
from aiy.vision.inference import CameraInference
from aiy.vision.models import image_classification
from picamera import PiCamera
def classes_info(classes):
return ', '.join('%s (%.2f)' % pair for pair in classes)
@contextlib.contextmanager
def CameraPreview(camera, enabled):
if enabled:
camera.start_preview()
try:
yield
finally:
if enabled:
camera.stop_preview()
def main():
parser = argparse.ArgumentParser('Image classification camera inference example.')
parser.add_argument('--num_frames', '-n', type=int, default=None,
help='Sets the number of frames to run for, otherwise runs forever.')
parser.add_argument('--num_objects', '-c', type=int, default=3,
help='Sets the number of object interences to print.')
parser.add_argument('--nopreview', dest='preview', action='store_false', default=True,
help='Enable camera preview')
args = parser.parse_args()
with PiCamera(sensor_mode=4, framerate=30) as camera, \
CameraPreview(camera, enabled=args.preview), \
CameraInference(image_classification.model()) as inference:
for result in inference.run(args.num_frames):
classes = image_classification.get_classes(result, top_k=args.num_objects)
print(classes_info(classes))
if classes:
camera.annotate_text = '%s (%.2f)' % classes[0]
if __name__ == '__main__':
main()
| true
| true
|
f70549113c4375b6a845661be245774cf999e7fa
| 1,084
|
py
|
Python
|
py/server/deephaven/config/__init__.py
|
mattrunyon/deephaven-core
|
80e3567e4647ab76a81e483d0a8ab542f9aadace
|
[
"MIT"
] | null | null | null |
py/server/deephaven/config/__init__.py
|
mattrunyon/deephaven-core
|
80e3567e4647ab76a81e483d0a8ab542f9aadace
|
[
"MIT"
] | null | null | null |
py/server/deephaven/config/__init__.py
|
mattrunyon/deephaven-core
|
80e3567e4647ab76a81e483d0a8ab542f9aadace
|
[
"MIT"
] | null | null | null |
#
# Copyright (c) 2016-2021 Deephaven Data Labs and Patent Pending
#
""" This module provides access to the Deephaven server configuration. """
import jpy
from deephaven import DHError
from deephaven.time import TimeZone
_JDHConfig = jpy.get_type("io.deephaven.configuration.Configuration")
_JDateTimeZone = jpy.get_type("org.joda.time.DateTimeZone")
def get_log_dir() -> str:
""" Returns the server's log directory. """
try:
return _JDHConfig.getInstance().getLogDir()
except Exception as e:
raise DHError(e, "failed to get the server's log directory.") from e
def get_server_timezone() -> TimeZone:
""" Returns the server's time zone. """
try:
j_timezone = _JDateTimeZone.forTimeZone(_JDHConfig.getInstance().getServerTimezone())
for tz in TimeZone:
if j_timezone == tz.value.getTimeZone():
return tz
raise NotImplementedError("can't find the time zone in the TImeZone Enum.")
except Exception as e:
raise DHError(e, message=f"failed to find a recognized time zone") from e
| 33.875
| 93
| 0.694649
|
import jpy
from deephaven import DHError
from deephaven.time import TimeZone
_JDHConfig = jpy.get_type("io.deephaven.configuration.Configuration")
_JDateTimeZone = jpy.get_type("org.joda.time.DateTimeZone")
def get_log_dir() -> str:
try:
return _JDHConfig.getInstance().getLogDir()
except Exception as e:
raise DHError(e, "failed to get the server's log directory.") from e
def get_server_timezone() -> TimeZone:
try:
j_timezone = _JDateTimeZone.forTimeZone(_JDHConfig.getInstance().getServerTimezone())
for tz in TimeZone:
if j_timezone == tz.value.getTimeZone():
return tz
raise NotImplementedError("can't find the time zone in the TImeZone Enum.")
except Exception as e:
raise DHError(e, message=f"failed to find a recognized time zone") from e
| true
| true
|
f70549e63137bea31c0367034d4cf81c7cef6244
| 19,148
|
py
|
Python
|
train/new_model_trainers/img_only.py
|
veritas9872/fastMRI-kspace
|
4c484b3183e9f06838b5ee108af283611c2e1e77
|
[
"MIT"
] | 18
|
2019-10-21T23:54:28.000Z
|
2021-12-23T08:16:04.000Z
|
train/new_model_trainers/img_only.py
|
veritas9872/fastMRI-kspace
|
4c484b3183e9f06838b5ee108af283611c2e1e77
|
[
"MIT"
] | 1
|
2020-07-11T08:05:33.000Z
|
2020-07-11T08:05:33.000Z
|
train/new_model_trainers/img_only.py
|
veritas9872/fastMRI-kspace
|
4c484b3183e9f06838b5ee108af283611c2e1e77
|
[
"MIT"
] | 5
|
2019-11-23T14:11:54.000Z
|
2022-02-19T13:39:15.000Z
|
import torch
from torch import nn, optim, multiprocessing
from torch.utils.data import DataLoader
from torch.utils.tensorboard.writer import SummaryWriter
from tqdm import tqdm
from time import time
from collections import defaultdict
from utils.run_utils import get_logger
from utils.train_utils import CheckpointManager, make_k_grid, make_img_grid, make_rss_slice, standardize_image
from data.data_transforms import complex_abs
from metrics.new_1d_ssim import SSIM
from metrics.custom_losses import psnr, nmse
# Send this somewhere else soon...
def get_class_name(obj):
return 'None' if obj is None else str(obj.__class__).split("'")[1]
class ModelTrainerIMG:
"""
Model trainer for real-valued image domain losses.
This model trainer can accept k-space an semi-k-space, regardless of weighting.
Both complex and real-valued image domain losses can be calculated.
"""
def __init__(self, args, model, optimizer, train_loader, val_loader, input_train_transform, input_val_transform,
output_train_transform, output_val_transform, losses, scheduler=None):
# Allow multiple processes to access tensors on GPU. Add checking for multiple continuous runs.
if multiprocessing.get_start_method(allow_none=True) is None:
multiprocessing.set_start_method(method='spawn')
self.logger = get_logger(name=__name__, save_file=args.log_path / args.run_name)
# Checking whether inputs are correct.
assert isinstance(model, nn.Module), '`model` must be a Pytorch Module.'
assert isinstance(optimizer, optim.Optimizer), '`optimizer` must be a Pytorch Optimizer.'
assert isinstance(train_loader, DataLoader) and isinstance(val_loader, DataLoader), \
'`train_loader` and `val_loader` must be Pytorch DataLoader objects.'
assert callable(input_train_transform) and callable(input_val_transform), \
'input_transforms must be callable functions.'
# I think this would be best practice.
assert isinstance(output_train_transform, nn.Module) and isinstance(output_val_transform, nn.Module), \
'`output_train_transform` and `output_val_transform` must be Pytorch Modules.'
# 'losses' is expected to be a dictionary.
# Even composite losses should be a single loss module with a tuple as its output.
losses = nn.ModuleDict(losses)
if scheduler is not None:
if isinstance(scheduler, optim.lr_scheduler.ReduceLROnPlateau):
self.metric_scheduler = True
elif isinstance(scheduler, optim.lr_scheduler._LRScheduler):
self.metric_scheduler = False
else:
raise TypeError('`scheduler` must be a Pytorch Learning Rate Scheduler.')
# Display interval of 0 means no display of validation images on TensorBoard.
if args.max_images <= 0:
self.display_interval = 0
else:
self.display_interval = int(len(val_loader.dataset) // (args.max_images * args.batch_size))
self.manager = CheckpointManager(model, optimizer, mode='min', save_best_only=args.save_best_only,
ckpt_dir=args.ckpt_path, max_to_keep=args.max_to_keep)
# loading from checkpoint if specified.
if vars(args).get('prev_model_ckpt'):
self.manager.load(load_dir=args.prev_model_ckpt, load_optimizer=False)
self.model = model
self.optimizer = optimizer
self.train_loader = train_loader
self.val_loader = val_loader
self.input_train_transform = input_train_transform
self.input_val_transform = input_val_transform
self.output_train_transform = output_train_transform
self.output_val_transform = output_val_transform
self.losses = losses
self.scheduler = scheduler
self.writer = SummaryWriter(str(args.log_path))
self.verbose = args.verbose
self.num_epochs = args.num_epochs
self.smoothing_factor = args.smoothing_factor
self.shrink_scale = args.shrink_scale
self.use_slice_metrics = args.use_slice_metrics
# This part should get SSIM, not 1 - SSIM.
self.ssim = SSIM(filter_size=7).to(device=args.device) # Needed to cache the kernel.
# Logging all components of the Model Trainer.
# Train and Val input and output transforms are assumed to use the same input transform class.
self.logger.info(f'''
Summary of Model Trainer Components:
Model: {get_class_name(model)}.
Optimizer: {get_class_name(optimizer)}.
Input Transforms: {get_class_name(input_val_transform)}.
Output Transform: {get_class_name(output_val_transform)}.
Image Domain Loss: {get_class_name(losses['img_loss'])}.
Learning-Rate Scheduler: {get_class_name(scheduler)}.
''') # This part has parts different for IMG and CMG losses!!
def train_model(self):
tic_tic = time()
self.logger.info('Beginning Training Loop.')
for epoch in range(1, self.num_epochs + 1): # 1 based indexing of epochs.
tic = time() # Training
train_epoch_loss, train_epoch_metrics = self._train_epoch(epoch=epoch)
toc = int(time() - tic)
self._log_epoch_outputs(epoch, train_epoch_loss, train_epoch_metrics, elapsed_secs=toc, training=True)
tic = time() # Validation
val_epoch_loss, val_epoch_metrics = self._val_epoch(epoch=epoch)
toc = int(time() - tic)
self._log_epoch_outputs(epoch, val_epoch_loss, val_epoch_metrics, elapsed_secs=toc, training=False)
self.manager.save(metric=val_epoch_loss, verbose=True)
if self.scheduler is not None:
if self.metric_scheduler: # If the scheduler is a metric based scheduler, include metrics.
self.scheduler.step(metrics=val_epoch_loss)
else:
self.scheduler.step()
self.writer.close() # Flushes remaining data to TensorBoard.
toc_toc = int(time() - tic_tic)
self.logger.info(f'Finishing Training Loop. Total elapsed time: '
f'{toc_toc // 3600} hr {(toc_toc // 60) % 60} min {toc_toc % 60} sec.')
def _train_epoch(self, epoch):
self.model.train()
torch.autograd.set_grad_enabled(True)
epoch_loss = list() # Appending values to list due to numerical underflow and NaN values.
epoch_metrics = defaultdict(list)
data_loader = enumerate(self.train_loader, start=1)
if not self.verbose: # tqdm has to be on the outermost iterator to function properly.
data_loader = tqdm(data_loader, total=len(self.train_loader.dataset)) # Should divide by batch size.
for step, data in data_loader:
# Data pre-processing is expected to have gradient calculations removed inside already.
inputs, targets, extra_params = self.input_train_transform(*data)
# 'recons' is a dictionary containing k-space, complex image, and real image reconstructions.
recons, step_loss, step_metrics = self._train_step(inputs, targets, extra_params)
epoch_loss.append(step_loss.detach()) # Perhaps not elegant, but underflow makes this necessary.
# Gradients are not calculated so as to boost speed and remove weird errors.
with torch.no_grad(): # Update epoch loss and metrics
if self.use_slice_metrics:
slice_metrics = self._get_slice_metrics(recons, targets, extra_params)
step_metrics.update(slice_metrics)
[epoch_metrics[key].append(value.detach()) for key, value in step_metrics.items()]
if self.verbose:
self._log_step_outputs(epoch, step, step_loss, step_metrics, training=True)
# Converted to scalar and dict with scalar values respectively.
return self._get_epoch_outputs(epoch, epoch_loss, epoch_metrics, training=True)
def _train_step(self, inputs, targets, extra_params):
self.optimizer.zero_grad()
outputs = self.model(inputs)
recons = self.output_train_transform(outputs, targets, extra_params)
step_loss, step_metrics = self._step(recons, targets, extra_params)
step_loss.backward()
self.optimizer.step()
return recons, step_loss, step_metrics
def _val_epoch(self, epoch):
self.model.eval()
torch.autograd.set_grad_enabled(False)
epoch_loss = list()
epoch_metrics = defaultdict(list)
# 1 based indexing for steps.
data_loader = enumerate(self.val_loader, start=1)
if not self.verbose:
data_loader = tqdm(data_loader, total=len(self.val_loader.dataset))
for step, data in data_loader:
inputs, targets, extra_params = self.input_val_transform(*data)
recons, step_loss, step_metrics = self._val_step(inputs, targets, extra_params)
epoch_loss.append(step_loss.detach())
if self.use_slice_metrics:
slice_metrics = self._get_slice_metrics(recons, targets, extra_params)
step_metrics.update(slice_metrics)
[epoch_metrics[key].append(value.detach()) for key, value in step_metrics.items()]
if self.verbose:
self._log_step_outputs(epoch, step, step_loss, step_metrics, training=False)
# Visualize images on TensorBoard.
self._visualize_images(recons, targets, extra_params, epoch, step, training=False)
# Converted to scalar and dict with scalar values respectively.
return self._get_epoch_outputs(epoch, epoch_loss, epoch_metrics, training=False)
def _val_step(self, inputs, targets, extra_params):
outputs = self.model(inputs)
recons = self.output_val_transform(outputs, targets, extra_params)
step_loss, step_metrics = self._step(recons, targets, extra_params)
return recons, step_loss, step_metrics
def _step(self, recons, targets, extra_params):
step_loss = self.losses['img_loss'](recons['img_recons'], targets['img_targets'])
# If img_loss is a tuple, it is expected to contain all its component losses as a dict in its second element.
step_metrics = dict()
if isinstance(step_loss, tuple):
step_loss, step_metrics = step_loss
acc = extra_params["acceleration"]
if step_metrics: # This has to be checked before anything is added to step_metrics.
for key, value in step_metrics.items():
step_metrics[f'acc_{acc}_{key}'] = value
step_metrics[f'acc_{acc}_loss'] = step_loss
return step_loss, step_metrics
def _visualize_images(self, recons, targets, extra_params, epoch, step, training=False):
mode = 'Training' if training else 'Validation'
# This numbering scheme seems to have issues for certain numbers.
# Please check cases when there is no remainder.
if self.display_interval and (step % self.display_interval == 0):
img_recon_grid = make_img_grid(recons['img_recons'], self.shrink_scale)
# The delta image is obtained by subtracting at the complex image, not the real valued image.
delta_image = complex_abs(targets['cmg_targets'] - recons['cmg_recons'])
delta_img_grid = make_img_grid(delta_image, self.shrink_scale)
acc = extra_params['acceleration']
kwargs = dict(global_step=epoch, dataformats='HW')
self.writer.add_image(f'{mode} Image Recons/{acc}/{step}', img_recon_grid, **kwargs)
self.writer.add_image(f'{mode} Delta Image/{acc}/{step}', delta_img_grid, **kwargs)
if 'kspace_recons' in recons:
kspace_recon_grid = make_k_grid(recons['kspace_recons'], self.smoothing_factor, self.shrink_scale)
self.writer.add_image(f'{mode} k-space Recons/{acc}/{step}', kspace_recon_grid, **kwargs)
# Adding RSS images of reconstructions and targets.
if 'rss_recons' in recons:
recon_rss = standardize_image(recons['rss_recons'])
delta_rss = standardize_image(make_rss_slice(delta_image))
self.writer.add_image(f'{mode} RSS Recons/{acc}/{step}', recon_rss, **kwargs)
self.writer.add_image(f'{mode} RSS Delta/{acc}/{step}', delta_rss, **kwargs)
if 'semi_kspace_recons' in recons:
semi_kspace_recon_grid = make_k_grid(
recons['semi_kspace_recons'], self.smoothing_factor, self.shrink_scale)
self.writer.add_image(f'{mode} semi-k-space Recons/{acc}/{step}', semi_kspace_recon_grid, **kwargs)
if epoch == 1: # Maybe add input images too later on.
img_target_grid = make_img_grid(targets['img_targets'], self.shrink_scale)
self.writer.add_image(f'{mode} Image Targets/{acc}/{step}', img_target_grid, **kwargs)
if 'kspace_targets' in targets:
kspace_target_grid = \
make_k_grid(targets['kspace_targets'], self.smoothing_factor, self.shrink_scale)
self.writer.add_image(f'{mode} k-space Targets/{acc}/{step}', kspace_target_grid, **kwargs)
if 'img_inputs' in targets:
# Not actually the input but what the input looks like as an image.
img_grid = make_img_grid(targets['img_inputs'], self.shrink_scale)
self.writer.add_image(f'{mode} Inputs as Images/{acc}/{step}', img_grid, **kwargs)
if 'rss_targets' in targets:
target_rss = standardize_image(targets['rss_targets'])
self.writer.add_image(f'{mode} RSS Targets/{acc}/{step}', target_rss, **kwargs)
if 'semi_kspace_targets' in targets:
semi_kspace_target_grid = make_k_grid(
targets['semi_kspace_targets'], self.smoothing_factor, self.shrink_scale)
self.writer.add_image(f'{mode} semi-k-space Targets/{acc}/{step}',
semi_kspace_target_grid, **kwargs)
def _get_slice_metrics(self, recons, targets, extra_params):
img_recons = recons['img_recons'].detach() # Just in case.
img_targets = targets['img_targets'].detach()
max_range = img_targets.max() - img_targets.min()
slice_ssim = self.ssim(img_recons, img_targets)
slice_psnr = psnr(img_recons, img_targets, data_range=max_range)
slice_nmse = nmse(img_recons, img_targets)
slice_metrics = {'slice/ssim': slice_ssim, 'slice/nmse': slice_nmse, 'slice/psnr': slice_psnr}
if 'rss_recons' in recons:
rss_recons = recons['rss_recons'].detach()
rss_targets = targets['rss_targets'].detach()
max_range = rss_targets.max() - rss_targets.min()
rss_ssim = self.ssim(rss_recons, rss_targets)
rss_psnr = psnr(rss_recons, rss_targets, data_range=max_range)
rss_nmse = nmse(rss_recons, rss_targets)
slice_metrics['rss/ssim'] = rss_ssim
slice_metrics['rss/psnr'] = rss_psnr
slice_metrics['rss/nmse'] = rss_nmse
else:
rss_ssim = rss_psnr = rss_nmse = 0
# Additional metrics for separating between acceleration factors.
if 'acceleration' in extra_params:
acc = extra_params["acceleration"]
slice_metrics[f'slice_acc_{acc}/ssim'] = slice_ssim
slice_metrics[f'slice_acc_{acc}/psnr'] = slice_psnr
slice_metrics[f'slice_acc_{acc}/nmse'] = slice_nmse
if 'rss_recons' in recons:
slice_metrics[f'rss_acc_{acc}/ssim'] = rss_ssim
slice_metrics[f'rss_acc_{acc}/psnr'] = rss_psnr
slice_metrics[f'rss_acc_{acc}/nmse'] = rss_nmse
return slice_metrics
def _get_epoch_outputs(self, epoch, epoch_loss, epoch_metrics, training=True):
mode = 'Training' if training else 'Validation'
num_slices = len(self.train_loader.dataset) if training else len(self.val_loader.dataset)
# Checking for nan values.
epoch_loss = torch.stack(epoch_loss)
is_finite = torch.isfinite(epoch_loss)
num_nans = (is_finite.size(0) - is_finite.sum()).item()
if num_nans > 0:
self.logger.warning(f'Epoch {epoch} {mode}: {num_nans} NaN values present in {num_slices} slices.'
f'Turning on anomaly detection.')
# Turn on anomaly detection for finding where the nan values are.
torch.autograd.set_detect_anomaly(True)
epoch_loss = torch.mean(epoch_loss[is_finite]).item()
else:
epoch_loss = torch.mean(epoch_loss).item()
for key, value in epoch_metrics.items():
epoch_metric = torch.stack(value)
is_finite = torch.isfinite(epoch_metric)
num_nans = (is_finite.size(0) - is_finite.sum()).item()
if num_nans > 0:
self.logger.warning(f'Epoch {epoch} {mode} {key}: {num_nans} NaN values present in {num_slices} slices.'
f'Turning on anomaly detection.')
epoch_metrics[key] = torch.mean(epoch_metric[is_finite]).item()
else:
epoch_metrics[key] = torch.mean(epoch_metric).item()
return epoch_loss, epoch_metrics
def _log_step_outputs(self, epoch, step, step_loss, step_metrics, training=True):
mode = 'Training' if training else 'Validation'
self.logger.info(f'Epoch {epoch:03d} Step {step:03d} {mode} loss: {step_loss.item():.4e}')
for key, value in step_metrics.items():
self.logger.info(f'Epoch {epoch:03d} Step {step:03d}: {mode} {key}: {value.item():.4e}')
def _log_epoch_outputs(self, epoch, epoch_loss, epoch_metrics, elapsed_secs, training=True):
mode = 'Training' if training else 'Validation'
self.logger.info(f'Epoch {epoch:03d} {mode}. loss: {epoch_loss:.4e}, '
f'Time: {elapsed_secs // 60} min {elapsed_secs % 60} sec')
self.writer.add_scalar(f'{mode} epoch_loss', scalar_value=epoch_loss, global_step=epoch)
for key, value in epoch_metrics.items():
self.logger.info(f'Epoch {epoch:03d} {mode}. {key}: {value:.4e}')
# Very important whether it is mode_~~ or mode/~~.
if 'loss' in key:
self.writer.add_scalar(f'{mode}/epoch_{key}', scalar_value=value, global_step=epoch)
else:
self.writer.add_scalar(f'{mode}_epoch_{key}', scalar_value=value, global_step=epoch)
if not training: # Record learning rate.
for idx, group in enumerate(self.optimizer.param_groups, start=1):
self.writer.add_scalar(f'learning_rate_{idx}', group['lr'], global_step=epoch)
| 49.606218
| 120
| 0.652966
|
import torch
from torch import nn, optim, multiprocessing
from torch.utils.data import DataLoader
from torch.utils.tensorboard.writer import SummaryWriter
from tqdm import tqdm
from time import time
from collections import defaultdict
from utils.run_utils import get_logger
from utils.train_utils import CheckpointManager, make_k_grid, make_img_grid, make_rss_slice, standardize_image
from data.data_transforms import complex_abs
from metrics.new_1d_ssim import SSIM
from metrics.custom_losses import psnr, nmse
def get_class_name(obj):
return 'None' if obj is None else str(obj.__class__).split("'")[1]
class ModelTrainerIMG:
def __init__(self, args, model, optimizer, train_loader, val_loader, input_train_transform, input_val_transform,
output_train_transform, output_val_transform, losses, scheduler=None):
# Allow multiple processes to access tensors on GPU. Add checking for multiple continuous runs.
if multiprocessing.get_start_method(allow_none=True) is None:
multiprocessing.set_start_method(method='spawn')
self.logger = get_logger(name=__name__, save_file=args.log_path / args.run_name)
# Checking whether inputs are correct.
assert isinstance(model, nn.Module), '`model` must be a Pytorch Module.'
assert isinstance(optimizer, optim.Optimizer), '`optimizer` must be a Pytorch Optimizer.'
assert isinstance(train_loader, DataLoader) and isinstance(val_loader, DataLoader), \
'`train_loader` and `val_loader` must be Pytorch DataLoader objects.'
assert callable(input_train_transform) and callable(input_val_transform), \
'input_transforms must be callable functions.'
# I think this would be best practice.
assert isinstance(output_train_transform, nn.Module) and isinstance(output_val_transform, nn.Module), \
'`output_train_transform` and `output_val_transform` must be Pytorch Modules.'
# 'losses' is expected to be a dictionary.
# Even composite losses should be a single loss module with a tuple as its output.
losses = nn.ModuleDict(losses)
if scheduler is not None:
if isinstance(scheduler, optim.lr_scheduler.ReduceLROnPlateau):
self.metric_scheduler = True
elif isinstance(scheduler, optim.lr_scheduler._LRScheduler):
self.metric_scheduler = False
else:
raise TypeError('`scheduler` must be a Pytorch Learning Rate Scheduler.')
# Display interval of 0 means no display of validation images on TensorBoard.
if args.max_images <= 0:
self.display_interval = 0
else:
self.display_interval = int(len(val_loader.dataset) // (args.max_images * args.batch_size))
self.manager = CheckpointManager(model, optimizer, mode='min', save_best_only=args.save_best_only,
ckpt_dir=args.ckpt_path, max_to_keep=args.max_to_keep)
# loading from checkpoint if specified.
if vars(args).get('prev_model_ckpt'):
self.manager.load(load_dir=args.prev_model_ckpt, load_optimizer=False)
self.model = model
self.optimizer = optimizer
self.train_loader = train_loader
self.val_loader = val_loader
self.input_train_transform = input_train_transform
self.input_val_transform = input_val_transform
self.output_train_transform = output_train_transform
self.output_val_transform = output_val_transform
self.losses = losses
self.scheduler = scheduler
self.writer = SummaryWriter(str(args.log_path))
self.verbose = args.verbose
self.num_epochs = args.num_epochs
self.smoothing_factor = args.smoothing_factor
self.shrink_scale = args.shrink_scale
self.use_slice_metrics = args.use_slice_metrics
# This part should get SSIM, not 1 - SSIM.
self.ssim = SSIM(filter_size=7).to(device=args.device) # Needed to cache the kernel.
# Logging all components of the Model Trainer.
# Train and Val input and output transforms are assumed to use the same input transform class.
self.logger.info(f'''
Summary of Model Trainer Components:
Model: {get_class_name(model)}.
Optimizer: {get_class_name(optimizer)}.
Input Transforms: {get_class_name(input_val_transform)}.
Output Transform: {get_class_name(output_val_transform)}.
Image Domain Loss: {get_class_name(losses['img_loss'])}.
Learning-Rate Scheduler: {get_class_name(scheduler)}.
''') # This part has parts different for IMG and CMG losses!!
def train_model(self):
tic_tic = time()
self.logger.info('Beginning Training Loop.')
for epoch in range(1, self.num_epochs + 1): # 1 based indexing of epochs.
tic = time() # Training
train_epoch_loss, train_epoch_metrics = self._train_epoch(epoch=epoch)
toc = int(time() - tic)
self._log_epoch_outputs(epoch, train_epoch_loss, train_epoch_metrics, elapsed_secs=toc, training=True)
tic = time() # Validation
val_epoch_loss, val_epoch_metrics = self._val_epoch(epoch=epoch)
toc = int(time() - tic)
self._log_epoch_outputs(epoch, val_epoch_loss, val_epoch_metrics, elapsed_secs=toc, training=False)
self.manager.save(metric=val_epoch_loss, verbose=True)
if self.scheduler is not None:
if self.metric_scheduler: # If the scheduler is a metric based scheduler, include metrics.
self.scheduler.step(metrics=val_epoch_loss)
else:
self.scheduler.step()
self.writer.close() # Flushes remaining data to TensorBoard.
toc_toc = int(time() - tic_tic)
self.logger.info(f'Finishing Training Loop. Total elapsed time: '
f'{toc_toc // 3600} hr {(toc_toc // 60) % 60} min {toc_toc % 60} sec.')
def _train_epoch(self, epoch):
self.model.train()
torch.autograd.set_grad_enabled(True)
epoch_loss = list() # Appending values to list due to numerical underflow and NaN values.
epoch_metrics = defaultdict(list)
data_loader = enumerate(self.train_loader, start=1)
if not self.verbose: # tqdm has to be on the outermost iterator to function properly.
data_loader = tqdm(data_loader, total=len(self.train_loader.dataset)) # Should divide by batch size.
for step, data in data_loader:
# Data pre-processing is expected to have gradient calculations removed inside already.
inputs, targets, extra_params = self.input_train_transform(*data)
# 'recons' is a dictionary containing k-space, complex image, and real image reconstructions.
recons, step_loss, step_metrics = self._train_step(inputs, targets, extra_params)
epoch_loss.append(step_loss.detach()) # Perhaps not elegant, but underflow makes this necessary.
# Gradients are not calculated so as to boost speed and remove weird errors.
with torch.no_grad(): # Update epoch loss and metrics
if self.use_slice_metrics:
slice_metrics = self._get_slice_metrics(recons, targets, extra_params)
step_metrics.update(slice_metrics)
[epoch_metrics[key].append(value.detach()) for key, value in step_metrics.items()]
if self.verbose:
self._log_step_outputs(epoch, step, step_loss, step_metrics, training=True)
# Converted to scalar and dict with scalar values respectively.
return self._get_epoch_outputs(epoch, epoch_loss, epoch_metrics, training=True)
def _train_step(self, inputs, targets, extra_params):
self.optimizer.zero_grad()
outputs = self.model(inputs)
recons = self.output_train_transform(outputs, targets, extra_params)
step_loss, step_metrics = self._step(recons, targets, extra_params)
step_loss.backward()
self.optimizer.step()
return recons, step_loss, step_metrics
def _val_epoch(self, epoch):
self.model.eval()
torch.autograd.set_grad_enabled(False)
epoch_loss = list()
epoch_metrics = defaultdict(list)
# 1 based indexing for steps.
data_loader = enumerate(self.val_loader, start=1)
if not self.verbose:
data_loader = tqdm(data_loader, total=len(self.val_loader.dataset))
for step, data in data_loader:
inputs, targets, extra_params = self.input_val_transform(*data)
recons, step_loss, step_metrics = self._val_step(inputs, targets, extra_params)
epoch_loss.append(step_loss.detach())
if self.use_slice_metrics:
slice_metrics = self._get_slice_metrics(recons, targets, extra_params)
step_metrics.update(slice_metrics)
[epoch_metrics[key].append(value.detach()) for key, value in step_metrics.items()]
if self.verbose:
self._log_step_outputs(epoch, step, step_loss, step_metrics, training=False)
# Visualize images on TensorBoard.
self._visualize_images(recons, targets, extra_params, epoch, step, training=False)
# Converted to scalar and dict with scalar values respectively.
return self._get_epoch_outputs(epoch, epoch_loss, epoch_metrics, training=False)
def _val_step(self, inputs, targets, extra_params):
outputs = self.model(inputs)
recons = self.output_val_transform(outputs, targets, extra_params)
step_loss, step_metrics = self._step(recons, targets, extra_params)
return recons, step_loss, step_metrics
def _step(self, recons, targets, extra_params):
step_loss = self.losses['img_loss'](recons['img_recons'], targets['img_targets'])
# If img_loss is a tuple, it is expected to contain all its component losses as a dict in its second element.
step_metrics = dict()
if isinstance(step_loss, tuple):
step_loss, step_metrics = step_loss
acc = extra_params["acceleration"]
if step_metrics: # This has to be checked before anything is added to step_metrics.
for key, value in step_metrics.items():
step_metrics[f'acc_{acc}_{key}'] = value
step_metrics[f'acc_{acc}_loss'] = step_loss
return step_loss, step_metrics
def _visualize_images(self, recons, targets, extra_params, epoch, step, training=False):
mode = 'Training' if training else 'Validation'
# This numbering scheme seems to have issues for certain numbers.
# Please check cases when there is no remainder.
if self.display_interval and (step % self.display_interval == 0):
img_recon_grid = make_img_grid(recons['img_recons'], self.shrink_scale)
# The delta image is obtained by subtracting at the complex image, not the real valued image.
delta_image = complex_abs(targets['cmg_targets'] - recons['cmg_recons'])
delta_img_grid = make_img_grid(delta_image, self.shrink_scale)
acc = extra_params['acceleration']
kwargs = dict(global_step=epoch, dataformats='HW')
self.writer.add_image(f'{mode} Image Recons/{acc}/{step}', img_recon_grid, **kwargs)
self.writer.add_image(f'{mode} Delta Image/{acc}/{step}', delta_img_grid, **kwargs)
if 'kspace_recons' in recons:
kspace_recon_grid = make_k_grid(recons['kspace_recons'], self.smoothing_factor, self.shrink_scale)
self.writer.add_image(f'{mode} k-space Recons/{acc}/{step}', kspace_recon_grid, **kwargs)
# Adding RSS images of reconstructions and targets.
if 'rss_recons' in recons:
recon_rss = standardize_image(recons['rss_recons'])
delta_rss = standardize_image(make_rss_slice(delta_image))
self.writer.add_image(f'{mode} RSS Recons/{acc}/{step}', recon_rss, **kwargs)
self.writer.add_image(f'{mode} RSS Delta/{acc}/{step}', delta_rss, **kwargs)
if 'semi_kspace_recons' in recons:
semi_kspace_recon_grid = make_k_grid(
recons['semi_kspace_recons'], self.smoothing_factor, self.shrink_scale)
self.writer.add_image(f'{mode} semi-k-space Recons/{acc}/{step}', semi_kspace_recon_grid, **kwargs)
if epoch == 1: # Maybe add input images too later on.
img_target_grid = make_img_grid(targets['img_targets'], self.shrink_scale)
self.writer.add_image(f'{mode} Image Targets/{acc}/{step}', img_target_grid, **kwargs)
if 'kspace_targets' in targets:
kspace_target_grid = \
make_k_grid(targets['kspace_targets'], self.smoothing_factor, self.shrink_scale)
self.writer.add_image(f'{mode} k-space Targets/{acc}/{step}', kspace_target_grid, **kwargs)
if 'img_inputs' in targets:
# Not actually the input but what the input looks like as an image.
img_grid = make_img_grid(targets['img_inputs'], self.shrink_scale)
self.writer.add_image(f'{mode} Inputs as Images/{acc}/{step}', img_grid, **kwargs)
if 'rss_targets' in targets:
target_rss = standardize_image(targets['rss_targets'])
self.writer.add_image(f'{mode} RSS Targets/{acc}/{step}', target_rss, **kwargs)
if 'semi_kspace_targets' in targets:
semi_kspace_target_grid = make_k_grid(
targets['semi_kspace_targets'], self.smoothing_factor, self.shrink_scale)
self.writer.add_image(f'{mode} semi-k-space Targets/{acc}/{step}',
semi_kspace_target_grid, **kwargs)
def _get_slice_metrics(self, recons, targets, extra_params):
img_recons = recons['img_recons'].detach() # Just in case.
img_targets = targets['img_targets'].detach()
max_range = img_targets.max() - img_targets.min()
slice_ssim = self.ssim(img_recons, img_targets)
slice_psnr = psnr(img_recons, img_targets, data_range=max_range)
slice_nmse = nmse(img_recons, img_targets)
slice_metrics = {'slice/ssim': slice_ssim, 'slice/nmse': slice_nmse, 'slice/psnr': slice_psnr}
if 'rss_recons' in recons:
rss_recons = recons['rss_recons'].detach()
rss_targets = targets['rss_targets'].detach()
max_range = rss_targets.max() - rss_targets.min()
rss_ssim = self.ssim(rss_recons, rss_targets)
rss_psnr = psnr(rss_recons, rss_targets, data_range=max_range)
rss_nmse = nmse(rss_recons, rss_targets)
slice_metrics['rss/ssim'] = rss_ssim
slice_metrics['rss/psnr'] = rss_psnr
slice_metrics['rss/nmse'] = rss_nmse
else:
rss_ssim = rss_psnr = rss_nmse = 0
# Additional metrics for separating between acceleration factors.
if 'acceleration' in extra_params:
acc = extra_params["acceleration"]
slice_metrics[f'slice_acc_{acc}/ssim'] = slice_ssim
slice_metrics[f'slice_acc_{acc}/psnr'] = slice_psnr
slice_metrics[f'slice_acc_{acc}/nmse'] = slice_nmse
if 'rss_recons' in recons:
slice_metrics[f'rss_acc_{acc}/ssim'] = rss_ssim
slice_metrics[f'rss_acc_{acc}/psnr'] = rss_psnr
slice_metrics[f'rss_acc_{acc}/nmse'] = rss_nmse
return slice_metrics
def _get_epoch_outputs(self, epoch, epoch_loss, epoch_metrics, training=True):
mode = 'Training' if training else 'Validation'
num_slices = len(self.train_loader.dataset) if training else len(self.val_loader.dataset)
# Checking for nan values.
epoch_loss = torch.stack(epoch_loss)
is_finite = torch.isfinite(epoch_loss)
num_nans = (is_finite.size(0) - is_finite.sum()).item()
if num_nans > 0:
self.logger.warning(f'Epoch {epoch} {mode}: {num_nans} NaN values present in {num_slices} slices.'
f'Turning on anomaly detection.')
# Turn on anomaly detection for finding where the nan values are.
torch.autograd.set_detect_anomaly(True)
epoch_loss = torch.mean(epoch_loss[is_finite]).item()
else:
epoch_loss = torch.mean(epoch_loss).item()
for key, value in epoch_metrics.items():
epoch_metric = torch.stack(value)
is_finite = torch.isfinite(epoch_metric)
num_nans = (is_finite.size(0) - is_finite.sum()).item()
if num_nans > 0:
self.logger.warning(f'Epoch {epoch} {mode} {key}: {num_nans} NaN values present in {num_slices} slices.'
f'Turning on anomaly detection.')
epoch_metrics[key] = torch.mean(epoch_metric[is_finite]).item()
else:
epoch_metrics[key] = torch.mean(epoch_metric).item()
return epoch_loss, epoch_metrics
def _log_step_outputs(self, epoch, step, step_loss, step_metrics, training=True):
mode = 'Training' if training else 'Validation'
self.logger.info(f'Epoch {epoch:03d} Step {step:03d} {mode} loss: {step_loss.item():.4e}')
for key, value in step_metrics.items():
self.logger.info(f'Epoch {epoch:03d} Step {step:03d}: {mode} {key}: {value.item():.4e}')
def _log_epoch_outputs(self, epoch, epoch_loss, epoch_metrics, elapsed_secs, training=True):
mode = 'Training' if training else 'Validation'
self.logger.info(f'Epoch {epoch:03d} {mode}. loss: {epoch_loss:.4e}, '
f'Time: {elapsed_secs // 60} min {elapsed_secs % 60} sec')
self.writer.add_scalar(f'{mode} epoch_loss', scalar_value=epoch_loss, global_step=epoch)
for key, value in epoch_metrics.items():
self.logger.info(f'Epoch {epoch:03d} {mode}. {key}: {value:.4e}')
# Very important whether it is mode_~~ or mode/~~.
if 'loss' in key:
self.writer.add_scalar(f'{mode}/epoch_{key}', scalar_value=value, global_step=epoch)
else:
self.writer.add_scalar(f'{mode}_epoch_{key}', scalar_value=value, global_step=epoch)
if not training: # Record learning rate.
for idx, group in enumerate(self.optimizer.param_groups, start=1):
self.writer.add_scalar(f'learning_rate_{idx}', group['lr'], global_step=epoch)
| true
| true
|
f7054b7e595f0681a798e1fedadc7b43405ebf05
| 2,576
|
py
|
Python
|
interpreter.py
|
bdngo/math-interpreter-py
|
fadcefce82176adf38722f7005270d6f2ea6957d
|
[
"MIT"
] | null | null | null |
interpreter.py
|
bdngo/math-interpreter-py
|
fadcefce82176adf38722f7005270d6f2ea6957d
|
[
"MIT"
] | null | null | null |
interpreter.py
|
bdngo/math-interpreter-py
|
fadcefce82176adf38722f7005270d6f2ea6957d
|
[
"MIT"
] | null | null | null |
from nodes import *
from tokens import Token, TokenType
class Interpreter:
def __init__(self, ast):
self.ast = ast
def eval(self):
return self.evalHelper(self.ast)
def evalHelper(self, ast):
if isinstance(ast, NumberNode):
return ast.node
elif isinstance(ast, AddNode):
return self.evalHelper(ast.node_a) + self.evalHelper(ast.node_b)
elif isinstance(ast, SubtractNode):
return self.evalHelper(ast.node_a) - self.evalHelper(ast.node_b)
elif isinstance(ast, MultiplyNode):
return self.evalHelper(ast.node_a) * self.evalHelper(ast.node_b)
elif isinstance(ast, DivideNode):
eval_b = self.evalHelper(ast.node_b)
if eval_b == 0:
raise ZeroDivisionError("Cannot divide by zero")
return self.evalHelper(ast.node_a) / eval_b
elif isinstance(ast, ModuloNode):
eval_b = self.evalHelper(ast.node_b)
if eval_b == 0:
raise ZeroDivisionError("Cannot divide by zero")
return self.evalHelper(ast.node_a) % eval_b
elif isinstance(ast, PowerNode):
return self.evalHelper(ast.node_a) ** self.evalHelper(ast.node_b)
elif isinstance(ast, PositiveNode):
return self.evalHelper(ast.node)
elif isinstance(ast, NegativeNode):
return -self.evalHelper(ast.node)
def postfix_eval(tokens):
stack = []
for t in tokens:
if t.type == TokenType.PLUS:
a = stack.pop().value
b = stack.pop().value
stack.append(Token(TokenType.NUMBER, a + b))
elif t.type == TokenType.MINUS:
a = stack.pop().value
b = stack.pop().value
stack.append(Token(TokenType.NUMBER, b - a))
elif t.type == TokenType.MULTIPLY:
a = stack.pop().value
b = stack.pop().value
stack.append(Token(TokenType.NUMBER, a * b))
elif t.type == TokenType.DIVIDE:
a = stack.pop().value
b = stack.pop().value
stack.append(Token(TokenType.NUMBER, b / a))
elif t.type == TokenType.MODULO:
print(stack)
a = stack.pop().value
b = stack.pop().value
stack.append(Token(TokenType.NUMBER, b % a))
elif t.type == TokenType.POWER:
a = stack.pop().value
b = stack.pop().value
stack.append(Token(TokenType.NUMBER, b ** a))
else:
stack.append(t)
return stack[0].value
| 37.333333
| 77
| 0.574534
|
from nodes import *
from tokens import Token, TokenType
class Interpreter:
def __init__(self, ast):
self.ast = ast
def eval(self):
return self.evalHelper(self.ast)
def evalHelper(self, ast):
if isinstance(ast, NumberNode):
return ast.node
elif isinstance(ast, AddNode):
return self.evalHelper(ast.node_a) + self.evalHelper(ast.node_b)
elif isinstance(ast, SubtractNode):
return self.evalHelper(ast.node_a) - self.evalHelper(ast.node_b)
elif isinstance(ast, MultiplyNode):
return self.evalHelper(ast.node_a) * self.evalHelper(ast.node_b)
elif isinstance(ast, DivideNode):
eval_b = self.evalHelper(ast.node_b)
if eval_b == 0:
raise ZeroDivisionError("Cannot divide by zero")
return self.evalHelper(ast.node_a) / eval_b
elif isinstance(ast, ModuloNode):
eval_b = self.evalHelper(ast.node_b)
if eval_b == 0:
raise ZeroDivisionError("Cannot divide by zero")
return self.evalHelper(ast.node_a) % eval_b
elif isinstance(ast, PowerNode):
return self.evalHelper(ast.node_a) ** self.evalHelper(ast.node_b)
elif isinstance(ast, PositiveNode):
return self.evalHelper(ast.node)
elif isinstance(ast, NegativeNode):
return -self.evalHelper(ast.node)
def postfix_eval(tokens):
stack = []
for t in tokens:
if t.type == TokenType.PLUS:
a = stack.pop().value
b = stack.pop().value
stack.append(Token(TokenType.NUMBER, a + b))
elif t.type == TokenType.MINUS:
a = stack.pop().value
b = stack.pop().value
stack.append(Token(TokenType.NUMBER, b - a))
elif t.type == TokenType.MULTIPLY:
a = stack.pop().value
b = stack.pop().value
stack.append(Token(TokenType.NUMBER, a * b))
elif t.type == TokenType.DIVIDE:
a = stack.pop().value
b = stack.pop().value
stack.append(Token(TokenType.NUMBER, b / a))
elif t.type == TokenType.MODULO:
print(stack)
a = stack.pop().value
b = stack.pop().value
stack.append(Token(TokenType.NUMBER, b % a))
elif t.type == TokenType.POWER:
a = stack.pop().value
b = stack.pop().value
stack.append(Token(TokenType.NUMBER, b ** a))
else:
stack.append(t)
return stack[0].value
| true
| true
|
f7054bb2d5a141238c03843e494f10f7a429bc1b
| 3,634
|
py
|
Python
|
upload/common/batch.py
|
sampierson/upload-service
|
b7c470706f729bdee34a4254555f798558877095
|
[
"MIT"
] | 6
|
2018-01-31T19:44:17.000Z
|
2020-02-20T13:03:09.000Z
|
upload/common/batch.py
|
sampierson/upload-service
|
b7c470706f729bdee34a4254555f798558877095
|
[
"MIT"
] | 379
|
2018-03-21T21:29:15.000Z
|
2020-01-28T14:20:48.000Z
|
upload/common/batch.py
|
HumanCellAtlas/staging-service
|
b7c470706f729bdee34a4254555f798558877095
|
[
"MIT"
] | 5
|
2018-03-09T14:13:15.000Z
|
2020-01-30T15:49:46.000Z
|
import hashlib
import json
import os
import boto3
from .retry import retry_on_aws_too_many_requests
batch = boto3.client('batch')
class JobDefinition:
@classmethod
def clear_all(cls):
deleted_count = 0
for jobdef in batch.describe_job_definitions(status='ACTIVE')['jobDefinitions']:
cls(metadata=jobdef).delete()
deleted_count += 1
return deleted_count
def __init__(self, docker_image=None, deployment=None, arn=None, metadata=None):
self.deployment = deployment if deployment else os.environ['DEPLOYMENT_STAGE']
if not docker_image and not metadata:
raise RuntimeError("you must provide docker_image or metadata")
self.metadata = metadata
self.docker_image = docker_image if docker_image else metadata['containerProperties']['image']
self.name = self._job_definition_name() if docker_image else metadata['jobDefinitionName']
if not arn:
if metadata:
self.arn = metadata['jobDefinitionArn']
print(f"Job definition {self.name} for {self.docker_image}:")
def find_or_create(self, job_role_arn):
if self.load():
print(f"\tfound {self.arn}")
else:
self.create(job_role_arn)
return self
def load(self):
jobdefs = self._describe_job_definitions(jobDefinitionName=self.name, status='ACTIVE')['jobDefinitions']
if len(jobdefs) > 0:
self.metadata = jobdefs[0]
self.arn = self.metadata['jobDefinitionArn']
return self
else:
return None
@retry_on_aws_too_many_requests
def create(self, job_role_arn):
self.metadata = batch.register_job_definition(
jobDefinitionName=self.name,
type='container',
parameters={},
containerProperties={
'image': self.docker_image,
'vcpus': 4,
'memory': 15000,
'command': [],
'jobRoleArn': job_role_arn,
'volumes': [
{
'host': {'sourcePath': '/data'},
'name': 'data'
},
],
'mountPoints': [
{
'containerPath': '/data',
'readOnly': False,
'sourceVolume': 'data'
},
]
},
retryStrategy={
'attempts': 3
}
)
self.arn = self.metadata['jobDefinitionArn']
print(f"\tcreated {self.arn}")
print(json.dumps(self.metadata, indent=4))
def delete(self):
print(f"Deleting job definition {self.name} ({self.docker_image})")
batch.deregister_job_definition(jobDefinition=self.arn)
@retry_on_aws_too_many_requests
def _describe_job_definitions(self, *args, **kwargs):
return batch.describe_job_definitions(*args, **kwargs)
def _job_definition_name(self):
"""
We create Job Definitions for each unique docker image we are given.
As there is no way to search for job definitions wih a particular Docker image,
we must put the Docker image name in the job definition name (the only thing we can search on).
We hash the image name as it will contain characters that aren't allowed in a job definition name.
"""
hasher = hashlib.sha1()
hasher.update(bytes(self.docker_image, 'utf8'))
return f"upload-{self.deployment}-{hasher.hexdigest()}"
| 35.627451
| 112
| 0.581728
|
import hashlib
import json
import os
import boto3
from .retry import retry_on_aws_too_many_requests
batch = boto3.client('batch')
class JobDefinition:
@classmethod
def clear_all(cls):
deleted_count = 0
for jobdef in batch.describe_job_definitions(status='ACTIVE')['jobDefinitions']:
cls(metadata=jobdef).delete()
deleted_count += 1
return deleted_count
def __init__(self, docker_image=None, deployment=None, arn=None, metadata=None):
self.deployment = deployment if deployment else os.environ['DEPLOYMENT_STAGE']
if not docker_image and not metadata:
raise RuntimeError("you must provide docker_image or metadata")
self.metadata = metadata
self.docker_image = docker_image if docker_image else metadata['containerProperties']['image']
self.name = self._job_definition_name() if docker_image else metadata['jobDefinitionName']
if not arn:
if metadata:
self.arn = metadata['jobDefinitionArn']
print(f"Job definition {self.name} for {self.docker_image}:")
def find_or_create(self, job_role_arn):
if self.load():
print(f"\tfound {self.arn}")
else:
self.create(job_role_arn)
return self
def load(self):
jobdefs = self._describe_job_definitions(jobDefinitionName=self.name, status='ACTIVE')['jobDefinitions']
if len(jobdefs) > 0:
self.metadata = jobdefs[0]
self.arn = self.metadata['jobDefinitionArn']
return self
else:
return None
@retry_on_aws_too_many_requests
def create(self, job_role_arn):
self.metadata = batch.register_job_definition(
jobDefinitionName=self.name,
type='container',
parameters={},
containerProperties={
'image': self.docker_image,
'vcpus': 4,
'memory': 15000,
'command': [],
'jobRoleArn': job_role_arn,
'volumes': [
{
'host': {'sourcePath': '/data'},
'name': 'data'
},
],
'mountPoints': [
{
'containerPath': '/data',
'readOnly': False,
'sourceVolume': 'data'
},
]
},
retryStrategy={
'attempts': 3
}
)
self.arn = self.metadata['jobDefinitionArn']
print(f"\tcreated {self.arn}")
print(json.dumps(self.metadata, indent=4))
def delete(self):
print(f"Deleting job definition {self.name} ({self.docker_image})")
batch.deregister_job_definition(jobDefinition=self.arn)
@retry_on_aws_too_many_requests
def _describe_job_definitions(self, *args, **kwargs):
return batch.describe_job_definitions(*args, **kwargs)
def _job_definition_name(self):
hasher = hashlib.sha1()
hasher.update(bytes(self.docker_image, 'utf8'))
return f"upload-{self.deployment}-{hasher.hexdigest()}"
| true
| true
|
f7054c9560ffb857b2249248031a041e8e79b6b8
| 143
|
py
|
Python
|
02-Data_Types_and_Variables/Exercises/4-Sum_of_Chars.py
|
eclipse-ib/Software-University-Fundamentals_Module
|
994ef75c70d1bae8e615dbb789aeffd6e0a42c34
|
[
"MIT"
] | null | null | null |
02-Data_Types_and_Variables/Exercises/4-Sum_of_Chars.py
|
eclipse-ib/Software-University-Fundamentals_Module
|
994ef75c70d1bae8e615dbb789aeffd6e0a42c34
|
[
"MIT"
] | null | null | null |
02-Data_Types_and_Variables/Exercises/4-Sum_of_Chars.py
|
eclipse-ib/Software-University-Fundamentals_Module
|
994ef75c70d1bae8e615dbb789aeffd6e0a42c34
|
[
"MIT"
] | null | null | null |
n = int(input())
total_sum = 0
for i in range(1,n+1):
letter = input()
total_sum += ord(letter)
print(f"The sum equals: {total_sum}")
| 17.875
| 37
| 0.622378
|
n = int(input())
total_sum = 0
for i in range(1,n+1):
letter = input()
total_sum += ord(letter)
print(f"The sum equals: {total_sum}")
| true
| true
|
f7054ddd1c29f5074a0fb83fe4035a51a2b1d6f7
| 24,926
|
py
|
Python
|
python/ccxt/wazirx.py
|
jspenc72/ccxt
|
5eb43754ddb85aa24fb16860ce80d18790c288be
|
[
"MIT"
] | null | null | null |
python/ccxt/wazirx.py
|
jspenc72/ccxt
|
5eb43754ddb85aa24fb16860ce80d18790c288be
|
[
"MIT"
] | 1
|
2022-01-27T19:54:13.000Z
|
2022-01-27T19:54:13.000Z
|
python/ccxt/wazirx.py
|
jspenc72/ccxt
|
5eb43754ddb85aa24fb16860ce80d18790c288be
|
[
"MIT"
] | 1
|
2022-03-15T22:51:08.000Z
|
2022-03-15T22:51:08.000Z
|
# -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.base.exchange import Exchange
import hashlib
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import PermissionDenied
from ccxt.base.errors import ArgumentsRequired
from ccxt.base.errors import BadRequest
from ccxt.base.errors import BadSymbol
from ccxt.base.errors import InsufficientFunds
from ccxt.base.errors import InvalidOrder
from ccxt.base.errors import RateLimitExceeded
from ccxt.base.precise import Precise
class wazirx(Exchange):
def describe(self):
return self.deep_extend(super(wazirx, self).describe(), {
'id': 'wazirx',
'name': 'WazirX',
'countries': ['IN'],
'version': 'v2',
'rateLimit': 100,
'has': {
'cancelAllOrders': True,
'cancelOrder': True,
'CORS': False,
'createOrder': True,
'fetchCurrencies': False,
'fetchBalance': True,
'fetchBidsAsks': False,
'fetchClosedOrders': False,
'fetchDepositAddress': False,
'fetchDeposits': True,
'fetchFundingFees': False,
'fetchFundingHistory': False,
'fetchFundingRate': False,
'fetchFundingRates': False,
'fetchMarkets': True,
'fetchMyTrades': False,
'fetchOHLCV': False,
'fetchOpenOrders': True,
'fetchOrder': True,
'fetchOrders': True,
'fetchOrderBook': True,
'fetchPositions': False,
'fetchStatus': True,
'fetchTicker': True,
'fetchTickers': True,
'fetchTime': True,
'fetchTrades': True,
'fetchTradingFee': False,
'fetchTradingFees': False,
'fetchTransactions': False,
'fetchWithdrawals': False,
'setLeverage': False,
'withdraw': False,
'fetchDepositAddressesByNetwork': False,
'transfer': False,
'fetchTransfers': False,
},
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/148647666-c109c20b-f8ac-472f-91c3-5f658cb90f49.jpeg',
'api': 'https://api.wazirx.com/sapi/v1',
'www': 'https://wazirx.com',
'doc': 'https://docs.wazirx.com/#public-rest-api-for-wazirx',
'fees': 'https://wazirx.com/fees',
},
'api': {
'public': {
'get': {
'exchangeInfo': 1,
'depth': 1,
'ping': 1,
'systemStatus': 1,
'tickers/24hr': 1,
'ticker/24hr': 1,
'time': 1,
'trades': 1,
},
},
'private': {
'get': {
'account': 1,
'allOrders': 1,
'funds': 1,
'historicalTrades': 1,
'openOrders': 1,
'order': 1,
},
'post': {
'order': 1,
'order/test': 1,
},
'delete': {
'order': 1,
'openOrders': 1,
},
},
},
'fees': {
'WRX': {'maker': self.parse_number('0.0'), 'taker': self.parse_number('0.0')},
},
'exceptions': {
'exact': {
'-1121': BadSymbol, # {"code": -1121, "message": "Invalid symbol."}
'1999': BadRequest, # {"code":1999,"message":"symbol is missing, symbol does not have a valid value"} message varies depending on the error
'2002': InsufficientFunds, # {"code":2002,"message":"Not enough USDT balance to execute self order"}
'2005': BadRequest, # {"code":2005,"message":"Signature is incorrect."}
'2078': PermissionDenied, # {"code":2078,"message":"Permission denied."}
'2098': BadRequest, # {"code":2098,"message":"Request out of receiving window."}
'2031': InvalidOrder, # {"code":2031,"message":"Minimum buy amount must be worth 2.0 USDT"}
'2113': BadRequest, # {"code":2113,"message":"RecvWindow must be in range 1..60000"}
'2115': BadRequest, # {"code":2115,"message":"Signature not found."}
'2136': RateLimitExceeded, # {"code":2136,"message":"Too many api request"}
'94001': InvalidOrder, # {"code":94001,"message":"Stop price not found."}
},
},
'options': {
# 'fetchTradesMethod': 'privateGetHistoricalTrades',
'recvWindow': 10000,
},
})
def fetch_markets(self, params={}):
response = self.publicGetExchangeInfo(params)
#
# {
# "timezone":"UTC",
# "serverTime":1641336850932,
# "symbols":[
# {
# "symbol":"btcinr",
# "status":"trading",
# "baseAsset":"btc",
# "quoteAsset":"inr",
# "baseAssetPrecision":5,
# "quoteAssetPrecision":0,
# "orderTypes":[
# "limit",
# "stop_limit"
# ],
# "isSpotTradingAllowed":true,
# "filters":[
# {
# "filterType":"PRICE_FILTER",
# "minPrice":"1",
# "tickSize":"1"
# }
# ]
# },
#
markets = self.safe_value(response, 'symbols', [])
result = []
for i in range(0, len(markets)):
entry = markets[i]
id = self.safe_string(entry, 'symbol')
baseId = self.safe_string(entry, 'baseAsset')
quoteId = self.safe_string(entry, 'quoteAsset')
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
symbol = base + '/' + quote
isSpot = self.safe_value(entry, 'isSpotTradingAllowed')
filters = self.safe_value(entry, 'filters')
minPrice = None
for j in range(0, len(filters)):
filter = filters[j]
filterType = self.safe_string(filter, 'filterType')
if filterType == 'PRICE_FILTER':
minPrice = self.safe_number(filter, 'minPrice')
fee = self.safe_value(self.fees, quote, {})
takerString = self.safe_string(fee, 'taker', '0.2')
takerString = Precise.string_div(takerString, '100')
taker = self.parse_number(takerString)
makerString = self.safe_string(fee, 'maker', '0.2')
makerString = Precise.string_div(makerString, '100')
maker = self.parse_number(makerString)
status = self.safe_string(entry, 'status')
active = status == 'trading'
limits = {
'price': {
'min': minPrice,
'max': None,
},
'amount': {
'min': None,
'max': None,
},
'cost': {
'min': None,
'max': None,
},
}
precision = {
'price': self.safe_integer(entry, 'quoteAssetPrecision'),
'amount': self.safe_integer(entry, 'baseAssetPrecision'),
}
result.append({
'info': entry,
'symbol': symbol,
'id': id,
'base': base,
'quote': quote,
'baseId': baseId,
'maker': maker,
'taker': taker,
'quoteId': quoteId,
'limits': limits,
'precision': precision,
'type': 'spot',
'spot': isSpot,
'active': active,
})
return result
def fetch_order_book(self, symbol, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
}
if limit is not None:
request['limit'] = limit # [1, 5, 10, 20, 50, 100, 500, 1000]
response = self.publicGetDepth(self.extend(request, params))
#
# {
# "timestamp":1559561187,
# "asks":[
# ["8540.0","1.5"],
# ["8541.0","0.0042"]
# ],
# "bids":[
# ["8530.0","0.8814"],
# ["8524.0","1.4"]
# ]
# }
#
timestamp = self.safe_integer(response, 'timestamp')
return self.parse_order_book(response, symbol, timestamp)
def fetch_ticker(self, symbol, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
}
ticker = self.publicGetTicker24hr(self.extend(request, params))
#
# {
# "symbol":"wrxinr",
# "baseAsset":"wrx",
# "quoteAsset":"inr",
# "openPrice":"94.77",
# "lowPrice":"92.7",
# "highPrice":"95.17",
# "lastPrice":"94.03",
# "volume":"1118700.0",
# "bidPrice":"94.02",
# "askPrice":"94.03",
# "at":1641382455000
# }
#
return self.parse_ticker(ticker, market)
def fetch_tickers(self, symbols=None, params={}):
self.load_markets()
tickers = self.publicGetTickers24hr()
#
# [
# {
# "symbol":"btcinr",
# "baseAsset":"btc",
# "quoteAsset":"inr",
# "openPrice":"3698486",
# "lowPrice":"3641155.0",
# "highPrice":"3767999.0",
# "lastPrice":"3713212.0",
# "volume":"254.11582",
# "bidPrice":"3715021.0",
# "askPrice":"3715022.0",
# }
# ...
# ]
#
result = {}
for i in range(0, len(tickers)):
ticker = tickers[i]
parsedTicker = self.parse_ticker(ticker)
symbol = parsedTicker['symbol']
result[symbol] = parsedTicker
return result
def fetch_trades(self, symbol, since=None, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
}
if limit is not None:
request['limit'] = limit # Default 500; max 1000.
method = self.safe_string(self.options, 'fetchTradesMethod', 'publicGetTrades')
response = getattr(self, method)(self.extend(request, params))
# [
# {
# "id":322307791,
# "price":"93.7",
# "qty":"0.7",
# "quoteQty":"65.59",
# "time":1641386701000,
# "isBuyerMaker":false
# },
# ]
return self.parse_trades(response, market, since, limit)
def parse_trade(self, trade, market=None):
#
# {
# "id":322307791,
# "price":"93.7",
# "qty":"0.7",
# "quoteQty":"65.59",
# "time":1641386701000,
# "isBuyerMaker":false
# }
#
id = self.safe_string(trade, 'id')
timestamp = self.parse8601(self.safe_string(trade, 'time'))
datetime = self.iso8601(timestamp)
symbol = None
if market is not None:
symbol = market['symbol']
isBuyerMaker = self.safe_value(trade, 'isBuyerMaker')
side = 'sell' if isBuyerMaker else 'buy'
price = self.safe_number(trade, 'price')
amount = self.safe_number(trade, 'qty')
cost = self.safe_number(trade, 'quoteQty')
return self.safe_trade({
'info': trade,
'id': id,
'timestamp': timestamp,
'datetime': datetime,
'symbol': symbol,
'order': id,
'type': None,
'side': side,
'takerOrMaker': None,
'price': price,
'amount': amount,
'cost': cost,
'fee': None,
})
def fetch_status(self, params={}):
response = self.publicGetSystemStatus(params)
#
# {"status":"normal","message":"System is running normally."}
#
status = self.safe_string(response, 'status')
status = 'ok' if (status == 'normal') else 'maintenance'
self.status = self.extend(self.status, {
'status': status,
'updated': self.milliseconds(),
})
return self.status
def fetch_time(self, params={}):
response = self.publicGetTime(params)
#
# {
# "serverTime":1635467280514
# }
#
return self.safe_integer(response, 'serverTime')
def parse_ticker(self, ticker, market=None):
#
# {
# "symbol":"btcinr",
# "baseAsset":"btc",
# "quoteAsset":"inr",
# "openPrice":"3698486",
# "lowPrice":"3641155.0",
# "highPrice":"3767999.0",
# "lastPrice":"3713212.0",
# "volume":"254.11582", # base volume
# "bidPrice":"3715021.0",
# "askPrice":"3715022.0",
# "at":1641382455000 # only on fetchTicker
# }
#
marketId = self.safe_string(ticker, 'symbol')
market = self.safe_market(marketId, market)
symbol = market['symbol']
last = self.safe_number(ticker, 'lastPrice')
open = self.safe_number(ticker, 'openPrice')
high = self.safe_number(ticker, 'highPrice')
low = self.safe_number(ticker, 'lowPrice')
baseVolume = self.safe_number(ticker, 'volume')
bid = self.safe_number(ticker, 'bidPrice')
ask = self.safe_number(ticker, 'askPrice')
timestamp = self.safe_string(ticker, 'at')
return self.safe_ticker({
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': high,
'low': low,
'bid': bid,
'bidVolume': None,
'ask': ask,
'askVolume': None,
'vwap': None,
'open': open,
'close': last,
'last': last,
'previousClose': None,
'change': None,
'percentage': None,
'average': None,
'baseVolume': baseVolume,
'quoteVolume': None,
'info': ticker,
}, market)
def parse_balance(self, response):
result = {}
for i in range(0, len(response)):
balance = response[i]
id = self.safe_string(balance, 'asset')
code = self.safe_currency_code(id)
account = self.account()
account['free'] = self.safe_string(balance, 'free')
account['used'] = self.safe_string(balance, 'locked')
result[code] = account
return self.safe_balance(result)
def fetch_balance(self, params={}):
self.load_markets()
response = self.privateGetFunds(params)
#
# [
# {
# "asset":"inr",
# "free":"0.0",
# "locked":"0.0"
# },
# ]
#
return self.parse_balance(response)
def fetch_orders(self, symbol=None, since=None, limit=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchOrders requires a `symbol` argument')
self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
}
if since is not None:
request['startTime'] = since
if limit is not None:
request['limit'] = limit
response = self.privateGetAllOrders(self.extend(request, params))
# [
# {
# "id": 28,
# "symbol": "wrxinr",
# "price": "9293.0",
# "origQty": "10.0",
# "executedQty": "8.2",
# "status": "cancel",
# "type": "limit",
# "side": "sell",
# "createdTime": 1499827319559,
# "updatedTime": 1499827319559
# },
# {
# "id": 30,
# "symbol": "wrxinr",
# "price": "9293.0",
# "stopPrice": "9200.0",
# "origQty": "10.0",
# "executedQty": "0.0",
# "status": "cancel",
# "type": "stop_limit",
# "side": "sell",
# "createdTime": 1499827319559,
# "updatedTime": 1507725176595
# }
# ]
orders = self.parse_orders(response, market, since, limit)
orders = self.filter_by(orders, 'symbol', symbol)
return orders
def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
self.load_markets()
request = {}
market = None
if symbol is not None:
market = self.market(symbol)
request['symbol'] = market['id']
response = self.privateGetOpenOrders(self.extend(request, params))
# [
# {
# "id": 28,
# "symbol": "wrxinr",
# "price": "9293.0",
# "origQty": "10.0",
# "executedQty": "8.2",
# "status": "cancel",
# "type": "limit",
# "side": "sell",
# "createdTime": 1499827319559,
# "updatedTime": 1499827319559
# },
# {
# "id": 30,
# "symbol": "wrxinr",
# "price": "9293.0",
# "stopPrice": "9200.0",
# "origQty": "10.0",
# "executedQty": "0.0",
# "status": "cancel",
# "type": "stop_limit",
# "side": "sell",
# "createdTime": 1499827319559,
# "updatedTime": 1507725176595
# }
# ]
orders = self.parse_orders(response, market, since, limit)
return orders
def cancel_all_orders(self, symbol=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' cancelAllOrders requires a `symbol` argument')
self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
}
return self.privateDeleteOpenOrders(self.extend(request, params))
def cancel_order(self, id, symbol=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' cancelOrder requires a `symbol` argument')
self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
'orderId': id,
}
response = self.privateDeleteOrder(self.extend(request, params))
return self.parse_order(response)
def create_order(self, symbol, type, side, amount, price=None, params={}):
if not (type == 'limit') or (type == 'stop_limit'):
raise ExchangeError(self.id + ' createOrder() supports limit and stop_limit orders only')
if price is None:
raise ExchangeError(self.id + ' createOrder() requires a price argument')
self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
'side': side,
'quantity': amount,
'type': 'limit',
}
request['price'] = self.price_to_precision(symbol, price)
stopPrice = self.safe_string(params, 'stopPrice')
if stopPrice is not None:
request['type'] = 'stop_limit'
response = self.privatePostOrder(self.extend(request, params))
# {
# "id": 28,
# "symbol": "wrxinr",
# "price": "9293.0",
# "origQty": "10.0",
# "executedQty": "8.2",
# "status": "wait",
# "type": "limit",
# "side": "sell",
# "createdTime": 1499827319559,
# "updatedTime": 1499827319559
# }
return self.parse_order(response, market)
def parse_order(self, order, market=None):
# {
# "id":1949417813,
# "symbol":"ltcusdt",
# "type":"limit",
# "side":"sell",
# "status":"done",
# "price":"146.2",
# "origQty":"0.05",
# "executedQty":"0.05",
# "createdTime":1641252564000,
# "updatedTime":1641252564000
# },
created = self.safe_integer(order, 'createdTime')
updated = self.safe_integer(order, 'updatedTime')
marketId = self.safe_string(order, 'symbol')
symbol = self.safe_symbol(marketId, market)
amount = self.safe_string(order, 'quantity')
filled = self.safe_string(order, 'executedQty')
status = self.parse_order_status(self.safe_string(order, 'status'))
id = self.safe_string(order, 'id')
price = self.safe_string(order, 'price')
type = self.safe_string_lower(order, 'type')
side = self.safe_string_lower(order, 'side')
return self.safe_order({
'info': order,
'id': id,
'clientOrderId': None,
'timestamp': created,
'datetime': self.iso8601(created),
'lastTradeTimestamp': updated,
'status': status,
'symbol': symbol,
'type': type,
'timeInForce': None,
'postOnly': None,
'side': side,
'price': price,
'amount': amount,
'filled': filled,
'remaining': None,
'cost': None,
'fee': None,
'average': None,
'trades': [],
}, market)
def parse_order_status(self, status):
statuses = {
'wait': 'open',
'done': 'closed',
'cancel': 'canceled',
}
return self.safe_string(statuses, status, status)
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
url = self.urls['api'] + '/' + path
if api == 'public':
if params:
url += '?' + self.urlencode(params)
if api == 'private':
self.check_required_credentials()
timestamp = self.milliseconds()
data = self.extend({'recvWindow': self.options['recvWindow'], 'timestamp': timestamp}, params)
data = self.keysort(data)
signature = self.hmac(self.encode(self.urlencode(data)), self.encode(self.secret), hashlib.sha256)
url += '?' + self.urlencode(data)
url += '&signature=' + signature
headers = {
'Content-Type': 'application/x-www-form-urlencoded',
'X-Api-Key': self.apiKey,
}
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def handle_errors(self, code, reason, url, method, headers, body, response, requestHeaders, requestBody):
#
# {"code":2098,"message":"Request out of receiving window."}
#
if response is None:
return
errorCode = self.safe_string(response, 'code')
if errorCode is not None:
feedback = self.id + ' ' + body
self.throw_exactly_matched_exception(self.exceptions['exact'], errorCode, feedback)
raise ExchangeError(feedback)
| 36.872781
| 160
| 0.46618
|
ge import Exchange
import hashlib
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import PermissionDenied
from ccxt.base.errors import ArgumentsRequired
from ccxt.base.errors import BadRequest
from ccxt.base.errors import BadSymbol
from ccxt.base.errors import InsufficientFunds
from ccxt.base.errors import InvalidOrder
from ccxt.base.errors import RateLimitExceeded
from ccxt.base.precise import Precise
class wazirx(Exchange):
def describe(self):
return self.deep_extend(super(wazirx, self).describe(), {
'id': 'wazirx',
'name': 'WazirX',
'countries': ['IN'],
'version': 'v2',
'rateLimit': 100,
'has': {
'cancelAllOrders': True,
'cancelOrder': True,
'CORS': False,
'createOrder': True,
'fetchCurrencies': False,
'fetchBalance': True,
'fetchBidsAsks': False,
'fetchClosedOrders': False,
'fetchDepositAddress': False,
'fetchDeposits': True,
'fetchFundingFees': False,
'fetchFundingHistory': False,
'fetchFundingRate': False,
'fetchFundingRates': False,
'fetchMarkets': True,
'fetchMyTrades': False,
'fetchOHLCV': False,
'fetchOpenOrders': True,
'fetchOrder': True,
'fetchOrders': True,
'fetchOrderBook': True,
'fetchPositions': False,
'fetchStatus': True,
'fetchTicker': True,
'fetchTickers': True,
'fetchTime': True,
'fetchTrades': True,
'fetchTradingFee': False,
'fetchTradingFees': False,
'fetchTransactions': False,
'fetchWithdrawals': False,
'setLeverage': False,
'withdraw': False,
'fetchDepositAddressesByNetwork': False,
'transfer': False,
'fetchTransfers': False,
},
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/148647666-c109c20b-f8ac-472f-91c3-5f658cb90f49.jpeg',
'api': 'https://api.wazirx.com/sapi/v1',
'www': 'https://wazirx.com',
'doc': 'https://docs.wazirx.com/#public-rest-api-for-wazirx',
'fees': 'https://wazirx.com/fees',
},
'api': {
'public': {
'get': {
'exchangeInfo': 1,
'depth': 1,
'ping': 1,
'systemStatus': 1,
'tickers/24hr': 1,
'ticker/24hr': 1,
'time': 1,
'trades': 1,
},
},
'private': {
'get': {
'account': 1,
'allOrders': 1,
'funds': 1,
'historicalTrades': 1,
'openOrders': 1,
'order': 1,
},
'post': {
'order': 1,
'order/test': 1,
},
'delete': {
'order': 1,
'openOrders': 1,
},
},
},
'fees': {
'WRX': {'maker': self.parse_number('0.0'), 'taker': self.parse_number('0.0')},
},
'exceptions': {
'exact': {
'-1121': BadSymbol,
'1999': BadRequest,
'2002': InsufficientFunds,
'2005': BadRequest,
'2078': PermissionDenied,
'2098': BadRequest,
'2031': InvalidOrder,
'2113': BadRequest,
'2115': BadRequest,
'2136': RateLimitExceeded,
'94001': InvalidOrder,
},
},
'options': {
'recvWindow': 10000,
},
})
def fetch_markets(self, params={}):
response = self.publicGetExchangeInfo(params)
markets = self.safe_value(response, 'symbols', [])
result = []
for i in range(0, len(markets)):
entry = markets[i]
id = self.safe_string(entry, 'symbol')
baseId = self.safe_string(entry, 'baseAsset')
quoteId = self.safe_string(entry, 'quoteAsset')
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
symbol = base + '/' + quote
isSpot = self.safe_value(entry, 'isSpotTradingAllowed')
filters = self.safe_value(entry, 'filters')
minPrice = None
for j in range(0, len(filters)):
filter = filters[j]
filterType = self.safe_string(filter, 'filterType')
if filterType == 'PRICE_FILTER':
minPrice = self.safe_number(filter, 'minPrice')
fee = self.safe_value(self.fees, quote, {})
takerString = self.safe_string(fee, 'taker', '0.2')
takerString = Precise.string_div(takerString, '100')
taker = self.parse_number(takerString)
makerString = self.safe_string(fee, 'maker', '0.2')
makerString = Precise.string_div(makerString, '100')
maker = self.parse_number(makerString)
status = self.safe_string(entry, 'status')
active = status == 'trading'
limits = {
'price': {
'min': minPrice,
'max': None,
},
'amount': {
'min': None,
'max': None,
},
'cost': {
'min': None,
'max': None,
},
}
precision = {
'price': self.safe_integer(entry, 'quoteAssetPrecision'),
'amount': self.safe_integer(entry, 'baseAssetPrecision'),
}
result.append({
'info': entry,
'symbol': symbol,
'id': id,
'base': base,
'quote': quote,
'baseId': baseId,
'maker': maker,
'taker': taker,
'quoteId': quoteId,
'limits': limits,
'precision': precision,
'type': 'spot',
'spot': isSpot,
'active': active,
})
return result
def fetch_order_book(self, symbol, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
}
if limit is not None:
request['limit'] = limit
response = self.publicGetDepth(self.extend(request, params))
timestamp = self.safe_integer(response, 'timestamp')
return self.parse_order_book(response, symbol, timestamp)
def fetch_ticker(self, symbol, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
}
ticker = self.publicGetTicker24hr(self.extend(request, params))
return self.parse_ticker(ticker, market)
def fetch_tickers(self, symbols=None, params={}):
self.load_markets()
tickers = self.publicGetTickers24hr()
result = {}
for i in range(0, len(tickers)):
ticker = tickers[i]
parsedTicker = self.parse_ticker(ticker)
symbol = parsedTicker['symbol']
result[symbol] = parsedTicker
return result
def fetch_trades(self, symbol, since=None, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
}
if limit is not None:
request['limit'] = limit
method = self.safe_string(self.options, 'fetchTradesMethod', 'publicGetTrades')
response = getattr(self, method)(self.extend(request, params))
return self.parse_trades(response, market, since, limit)
def parse_trade(self, trade, market=None):
id = self.safe_string(trade, 'id')
timestamp = self.parse8601(self.safe_string(trade, 'time'))
datetime = self.iso8601(timestamp)
symbol = None
if market is not None:
symbol = market['symbol']
isBuyerMaker = self.safe_value(trade, 'isBuyerMaker')
side = 'sell' if isBuyerMaker else 'buy'
price = self.safe_number(trade, 'price')
amount = self.safe_number(trade, 'qty')
cost = self.safe_number(trade, 'quoteQty')
return self.safe_trade({
'info': trade,
'id': id,
'timestamp': timestamp,
'datetime': datetime,
'symbol': symbol,
'order': id,
'type': None,
'side': side,
'takerOrMaker': None,
'price': price,
'amount': amount,
'cost': cost,
'fee': None,
})
def fetch_status(self, params={}):
response = self.publicGetSystemStatus(params)
status = self.safe_string(response, 'status')
status = 'ok' if (status == 'normal') else 'maintenance'
self.status = self.extend(self.status, {
'status': status,
'updated': self.milliseconds(),
})
return self.status
def fetch_time(self, params={}):
response = self.publicGetTime(params)
return self.safe_integer(response, 'serverTime')
def parse_ticker(self, ticker, market=None):
marketId = self.safe_string(ticker, 'symbol')
market = self.safe_market(marketId, market)
symbol = market['symbol']
last = self.safe_number(ticker, 'lastPrice')
open = self.safe_number(ticker, 'openPrice')
high = self.safe_number(ticker, 'highPrice')
low = self.safe_number(ticker, 'lowPrice')
baseVolume = self.safe_number(ticker, 'volume')
bid = self.safe_number(ticker, 'bidPrice')
ask = self.safe_number(ticker, 'askPrice')
timestamp = self.safe_string(ticker, 'at')
return self.safe_ticker({
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': high,
'low': low,
'bid': bid,
'bidVolume': None,
'ask': ask,
'askVolume': None,
'vwap': None,
'open': open,
'close': last,
'last': last,
'previousClose': None,
'change': None,
'percentage': None,
'average': None,
'baseVolume': baseVolume,
'quoteVolume': None,
'info': ticker,
}, market)
def parse_balance(self, response):
result = {}
for i in range(0, len(response)):
balance = response[i]
id = self.safe_string(balance, 'asset')
code = self.safe_currency_code(id)
account = self.account()
account['free'] = self.safe_string(balance, 'free')
account['used'] = self.safe_string(balance, 'locked')
result[code] = account
return self.safe_balance(result)
def fetch_balance(self, params={}):
self.load_markets()
response = self.privateGetFunds(params)
return self.parse_balance(response)
def fetch_orders(self, symbol=None, since=None, limit=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchOrders requires a `symbol` argument')
self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
}
if since is not None:
request['startTime'] = since
if limit is not None:
request['limit'] = limit
response = self.privateGetAllOrders(self.extend(request, params))
orders = self.parse_orders(response, market, since, limit)
orders = self.filter_by(orders, 'symbol', symbol)
return orders
def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
self.load_markets()
request = {}
market = None
if symbol is not None:
market = self.market(symbol)
request['symbol'] = market['id']
response = self.privateGetOpenOrders(self.extend(request, params))
orders = self.parse_orders(response, market, since, limit)
return orders
def cancel_all_orders(self, symbol=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' cancelAllOrders requires a `symbol` argument')
self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
}
return self.privateDeleteOpenOrders(self.extend(request, params))
def cancel_order(self, id, symbol=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' cancelOrder requires a `symbol` argument')
self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
'orderId': id,
}
response = self.privateDeleteOrder(self.extend(request, params))
return self.parse_order(response)
def create_order(self, symbol, type, side, amount, price=None, params={}):
if not (type == 'limit') or (type == 'stop_limit'):
raise ExchangeError(self.id + ' createOrder() supports limit and stop_limit orders only')
if price is None:
raise ExchangeError(self.id + ' createOrder() requires a price argument')
self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
'side': side,
'quantity': amount,
'type': 'limit',
}
request['price'] = self.price_to_precision(symbol, price)
stopPrice = self.safe_string(params, 'stopPrice')
if stopPrice is not None:
request['type'] = 'stop_limit'
response = self.privatePostOrder(self.extend(request, params))
return self.parse_order(response, market)
def parse_order(self, order, market=None):
created = self.safe_integer(order, 'createdTime')
updated = self.safe_integer(order, 'updatedTime')
marketId = self.safe_string(order, 'symbol')
symbol = self.safe_symbol(marketId, market)
amount = self.safe_string(order, 'quantity')
filled = self.safe_string(order, 'executedQty')
status = self.parse_order_status(self.safe_string(order, 'status'))
id = self.safe_string(order, 'id')
price = self.safe_string(order, 'price')
type = self.safe_string_lower(order, 'type')
side = self.safe_string_lower(order, 'side')
return self.safe_order({
'info': order,
'id': id,
'clientOrderId': None,
'timestamp': created,
'datetime': self.iso8601(created),
'lastTradeTimestamp': updated,
'status': status,
'symbol': symbol,
'type': type,
'timeInForce': None,
'postOnly': None,
'side': side,
'price': price,
'amount': amount,
'filled': filled,
'remaining': None,
'cost': None,
'fee': None,
'average': None,
'trades': [],
}, market)
def parse_order_status(self, status):
statuses = {
'wait': 'open',
'done': 'closed',
'cancel': 'canceled',
}
return self.safe_string(statuses, status, status)
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
url = self.urls['api'] + '/' + path
if api == 'public':
if params:
url += '?' + self.urlencode(params)
if api == 'private':
self.check_required_credentials()
timestamp = self.milliseconds()
data = self.extend({'recvWindow': self.options['recvWindow'], 'timestamp': timestamp}, params)
data = self.keysort(data)
signature = self.hmac(self.encode(self.urlencode(data)), self.encode(self.secret), hashlib.sha256)
url += '?' + self.urlencode(data)
url += '&signature=' + signature
headers = {
'Content-Type': 'application/x-www-form-urlencoded',
'X-Api-Key': self.apiKey,
}
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def handle_errors(self, code, reason, url, method, headers, body, response, requestHeaders, requestBody):
if response is None:
return
errorCode = self.safe_string(response, 'code')
if errorCode is not None:
feedback = self.id + ' ' + body
self.throw_exactly_matched_exception(self.exceptions['exact'], errorCode, feedback)
raise ExchangeError(feedback)
| true
| true
|
f7054e9b703e0a50932a707ef947c70e5aed0d9c
| 1,295
|
py
|
Python
|
bin/MemoryRuns.py
|
seanluciotolentino/SimpactPurple
|
a81a738bd63bc1d6a86f7243c0826f6e5d846447
|
[
"AFL-3.0"
] | null | null | null |
bin/MemoryRuns.py
|
seanluciotolentino/SimpactPurple
|
a81a738bd63bc1d6a86f7243c0826f6e5d846447
|
[
"AFL-3.0"
] | null | null | null |
bin/MemoryRuns.py
|
seanluciotolentino/SimpactPurple
|
a81a738bd63bc1d6a86f7243c0826f6e5d846447
|
[
"AFL-3.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Wed Oct 23 14:16:27 2013
@author: Lucio
Program for assessing the memory footprint of the simulation. Needs the
memory_profiler module (installed on the milano cluster).
"""
import os
import time
import sys
import simpactpurple
from memory_profiler import profile
@profile
def run_single(pop):
s = simpactpurple.Community()
s.INITIAL_POPULATION = pop
#Simulate a run of the simulation
s.start() # initialize data structures
#a few timesteps
s.update_recruiting(s.RECRUIT_INITIAL)
for i in range(s.RECRUIT_WARM_UP):
s.time = i
s.time_operator.step() # 1. Time progresses
s.relationship_operator.step() # 2. Form and dissolve relationships
s.infection_operator.step() # 3. HIV transmission
s.update_recruiting(s.RECRUIT_RATE)
for i in range(s.RECRUIT_WARM_UP, int(s.NUMBER_OF_YEARS*52)):
s.time = i
s.time_operator.step() # 1. Time progresses
s.relationship_operator.step() # 2. Form and dissolve relationships
s.infection_operator.step() # 3. HIV transmission
#post-process / clean-up
for pipe in s.pipes.values():
pipe.send("terminate")
if __name__ == '__main__':
run_single(int(sys.argv[1]))
| 27.553191
| 76
| 0.674131
|
import os
import time
import sys
import simpactpurple
from memory_profiler import profile
@profile
def run_single(pop):
s = simpactpurple.Community()
s.INITIAL_POPULATION = pop
s.start()
s.update_recruiting(s.RECRUIT_INITIAL)
for i in range(s.RECRUIT_WARM_UP):
s.time = i
s.time_operator.step()
s.relationship_operator.step()
s.infection_operator.step()
s.update_recruiting(s.RECRUIT_RATE)
for i in range(s.RECRUIT_WARM_UP, int(s.NUMBER_OF_YEARS*52)):
s.time = i
s.time_operator.step()
s.relationship_operator.step()
s.infection_operator.step()
for pipe in s.pipes.values():
pipe.send("terminate")
if __name__ == '__main__':
run_single(int(sys.argv[1]))
| true
| true
|
f7054fb9122add1a551c527b03a0139ed75b600b
| 416
|
py
|
Python
|
aserializer/django/utils.py
|
orderbird/aserializer
|
3aeaa073f2dac7830458a1f45ffa9af6540bd315
|
[
"MIT"
] | null | null | null |
aserializer/django/utils.py
|
orderbird/aserializer
|
3aeaa073f2dac7830458a1f45ffa9af6540bd315
|
[
"MIT"
] | null | null | null |
aserializer/django/utils.py
|
orderbird/aserializer
|
3aeaa073f2dac7830458a1f45ffa9af6540bd315
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
try:
import django
except ImportError as e:
django = None
django_import_error = e
def check_django_import():
if django is None:
raise django_import_error
class django_required(object):
def __call__(self, func):
def wrapper(self, *args, **kwargs):
check_django_import()
return func(self, *args, **kwargs)
return wrapper
| 19.809524
| 46
| 0.625
|
try:
import django
except ImportError as e:
django = None
django_import_error = e
def check_django_import():
if django is None:
raise django_import_error
class django_required(object):
def __call__(self, func):
def wrapper(self, *args, **kwargs):
check_django_import()
return func(self, *args, **kwargs)
return wrapper
| true
| true
|
f705510f848be122cef73d6a694e14dd0d464839
| 111,947
|
py
|
Python
|
python/paddle/tensor/math.py
|
LemonNoel/Paddle
|
1cb511d1488bb86ebb587330902840cb01c79c0d
|
[
"Apache-2.0"
] | null | null | null |
python/paddle/tensor/math.py
|
LemonNoel/Paddle
|
1cb511d1488bb86ebb587330902840cb01c79c0d
|
[
"Apache-2.0"
] | null | null | null |
python/paddle/tensor/math.py
|
LemonNoel/Paddle
|
1cb511d1488bb86ebb587330902840cb01c79c0d
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
math functions
"""
from __future__ import print_function
import numpy as np
from paddle.common_ops_import import VarDesc
from paddle.common_ops_import import dygraph_only
from paddle.common_ops_import import OpProtoHolder
from paddle.common_ops_import import templatedoc
from paddle.common_ops_import import dygraph_utils
from paddle.tensor import cast
from paddle.tensor.attribute import _complex_to_real_dtype
import paddle
from ..fluid import layers
from ..fluid.framework import core, _varbase_creator, in_dygraph_mode, Variable, convert_np_dtype_to_dtype_
from ..fluid.layer_helper import LayerHelper
from ..fluid.data_feeder import check_variable_and_dtype, check_type, check_dtype, convert_dtype
from ..fluid.layers.layer_function_generator import _generate_doc_string_, generate_activation_fn, generate_layer_fn
from ..fluid.dygraph.inplace_utils import inplace_apis_in_dygraph_only
# TODO: define math functions
# yapf: disable
from ..fluid.layers import abs # noqa: F401
from ..fluid.layers import acos # noqa: F401
from ..fluid.layers import asin # noqa: F401
from ..fluid.layers import ceil # noqa: F401
from ..fluid.layers import ceil_ # noqa: F401
from ..fluid.layers import cos # noqa: F401
from ..fluid.layers import tan # noqa: F401
from ..fluid.layers import sinh # noqa: F401
from ..fluid.layers import cosh # noqa: F401
from ..fluid.layers import exp # noqa: F401
from ..fluid.layers import exp_ # noqa: F401
from ..fluid.layers import expm1 # noqa: F401
from ..fluid.layers import floor # noqa: F401
from ..fluid.layers import floor_ # noqa: F401
from ..fluid.layers import log # noqa: F401
from ..fluid.layers import reciprocal # noqa: F401
from ..fluid.layers import reciprocal_ # noqa: F401
from ..fluid.layers import round # noqa: F401
from ..fluid.layers import round_ # noqa: F401
from ..fluid.layers import rsqrt # noqa: F401
from ..fluid.layers import rsqrt_ # noqa: F401
from ..fluid.layers import scale # noqa: F401
from ..fluid.layers import square # noqa: F401
from ..fluid.layers import stanh # noqa: F401
from ..fluid.layers import atan # noqa: F401
from ..fluid.layers import erf # noqa: F401
from ..fluid.layers import sqrt # noqa: F401
from ..fluid.layers import sqrt_ # noqa: F401
from ..fluid.layers import sin # noqa: F401
from ..fluid.layers import lgamma # noqa: F401
from ..fluid.layers import multiplex # noqa: F401
from ..fluid import layers
from paddle import _C_ops
__all__ = []
_supported_int_dtype_ = [
VarDesc.VarType.UINT8,
VarDesc.VarType.INT8,
VarDesc.VarType.INT16,
VarDesc.VarType.INT32,
VarDesc.VarType.INT64,
]
_supported_float_dtype_ = [
VarDesc.VarType.FP32,
VarDesc.VarType.FP64,
]
@inplace_apis_in_dygraph_only
def scale_(x, scale=1.0, bias=0.0, bias_after_scale=True, act=None, name=None):
"""
Inplace version of ``scale`` API, the output Tensor will be inplaced with input ``x``.
Please refer to :ref:`api_tensor_scale`.
"""
_scale = scale.numpy().item(0) if isinstance(scale, Variable) else scale
return _C_ops.scale_(x, 'scale',
float(_scale), 'bias',
float(bias), 'bias_after_scale', bias_after_scale)
def pow(x, y, name=None):
"""
Compute the power of tensor elements. The equation is:
.. math::
out = x^{y}
**Note**:
``paddle.pow`` supports broadcasting. If you want know more about broadcasting, please refer to :ref:`user_guide_broadcasting` .
Args:
x (Tensor): An N-D Tensor, the data type is float32, float64, int32 or int64.
y (float|int|Tensor): If it is an N-D Tensor, its data type should be the same as `x`.
name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
Returns:
N-D Tensor. A location into which the result is stored. Its dimension and data type are the same as `x`.
Examples:
.. code-block:: python
import paddle
x = paddle.to_tensor([1, 2, 3], dtype='float32')
# example 1: y is a float or int
res = paddle.pow(x, 2)
print(res)
# Tensor(shape=[3], dtype=float32, place=CUDAPlace(0), stop_gradient=True,
# [1., 4., 9.])
res = paddle.pow(x, 2.5)
print(res)
# Tensor(shape=[3], dtype=float32, place=CUDAPlace(0), stop_gradient=True,
# [1. , 5.65685415 , 15.58845711])
# example 2: y is a Tensor
y = paddle.to_tensor([2], dtype='float32')
res = paddle.pow(x, y)
print(res)
# Tensor(shape=[3], dtype=float32, place=CUDAPlace(0), stop_gradient=True,
# [1., 4., 9.])
"""
# in dynamic graph mode
if in_dygraph_mode():
if isinstance(y, (int, float)):
return _C_ops.pow(x, 'factor', y)
elif isinstance(y, (paddle.Tensor, Variable)):
return _elementwise_op_in_dygraph(
x, y, axis=-1, act=None, op_name='elementwise_pow')
else:
raise TypeError('y must be scalar or tensor type, but received: %s '% (y.dtype))
# in static graph mode
else:
if isinstance(y, (int, float)):
helper = LayerHelper('pow', **locals())
inputs = {'X': x}
attrs = {'factor': y}
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type='pow', inputs=inputs, outputs={'Out': out}, attrs=attrs)
return out
elif isinstance(y, (paddle.Tensor, Variable)):
# TODO A potential speed improvement is supporting different types in C++ and removing the cast ops here
helper = LayerHelper('elementwise_pow', **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
return _elementwise_op(LayerHelper('elementwise_pow', **locals()))
else:
raise TypeError('y must be scalar or tensor type, but received: %s '% (type(y)))
@dygraph_only
def _elementwise_op_in_dygraph(x,
y,
axis=-1,
act=None,
use_mkldnn=False,
op_name=None):
op = getattr(_C_ops, op_name)
out = op(x, y, 'axis', axis, 'use_mkldnn', use_mkldnn)
return dygraph_utils._append_activation_in_dygraph(
out, act, use_mkldnn=use_mkldnn)
def _elementwise_op(helper):
op_type = helper.layer_type
original_op_type = helper.kwargs.get('original_op_type', op_type)
x = helper.kwargs.get('x', None)
y = helper.kwargs.get('y', None)
out = helper.kwargs.get('out', None)
assert x is not None, 'x cannot be None in {}'.format(original_op_type)
assert y is not None, 'y cannot be None in {}'.format(original_op_type)
check_variable_and_dtype(
x, 'x', ['float16', 'float32', 'float64', 'int32', 'int64', 'bool'],
original_op_type)
check_variable_and_dtype(
y, 'y', ['float16', 'float32', 'float64', 'int32', 'int64', 'bool'],
original_op_type)
axis = helper.kwargs.get('axis', -1)
use_mkldnn = helper.kwargs.get('use_mkldnn', False)
name = helper.kwargs.get('name', None)
if out is None:
if name is None:
out = helper.create_variable_for_type_inference(dtype=x.dtype)
else:
out = helper.create_variable(name=name, dtype=x.dtype, persistable=False)
helper.append_op(
type=op_type,
inputs={'X': x,
'Y': y},
outputs={'Out': out},
attrs={'axis': axis,
'use_mkldnn': use_mkldnn})
return helper.append_activation(out)
def add(x, y, name=None):
"""
Examples:
.. code-block:: python
import paddle
x = paddle.to_tensor([2, 3, 4], 'float64')
y = paddle.to_tensor([1, 5, 2], 'float64')
z = paddle.add(x, y)
print(z) # [3., 8., 6. ]
"""
if in_dygraph_mode():
return _C_ops.elementwise_add(x, y)
return _elementwise_op(LayerHelper('elementwise_add', **locals()))
@inplace_apis_in_dygraph_only
def add_(x, y, name=None):
"""
Inplace version of ``add`` API, the output Tensor will be inplaced with input ``x``.
Please refer to :ref:`api_tensor_add`.
"""
op_type = 'elementwise_add_'
axis = -1
out_shape = broadcast_shape(x.shape, y.shape)
if out_shape != x.shape:
raise ValueError("The shape of broadcast output {} is different from that of inplace tensor {} in the Inplace operation.".format(out_shape, x.shape))
out = _elementwise_op_in_dygraph(
x, y, axis=axis, op_name=op_type)
return out
def subtract(x, y, name=None):
"""
Substract two tensors element-wise. The equation is:
.. math::
out = x - y
**Note**:
``paddle.subtract`` supports broadcasting. If you want know more about broadcasting, please refer to :ref:`user_guide_broadcasting` .
Args:
x (Tensor): the input tensor, it's data type should be float32, float64, int32, int64.
y (Tensor): the input tensor, it's data type should be float32, float64, int32, int64.
name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
Returns:
N-D Tensor. A location into which the result is stored. If x, y have different shapes and are "broadcastable", the resulting tensor shape is the shape of x and y after broadcasting. If x, y have the same shape, its shape is the same as x and y.
Examples:
.. code-block:: python
import numpy as np
import paddle
x = paddle.to_tensor([[1, 2], [7, 8]])
y = paddle.to_tensor([[5, 6], [3, 4]])
res = paddle.subtract(x, y)
print(res)
# [[-4, -4],
# [4, 4]]
x = paddle.to_tensor([[[1, 2, 3], [1, 2, 3]]])
y = paddle.to_tensor([1, 0, 4])
res = paddle.subtract(x, y)
print(res)
# [[[ 0, 2, -1],
# [ 0, 2, -1]]]
x = paddle.to_tensor([2, np.nan, 5], dtype='float32')
y = paddle.to_tensor([1, 4, np.nan], dtype='float32')
res = paddle.subtract(x, y)
print(res)
# [ 1., nan, nan]
x = paddle.to_tensor([5, np.inf, -np.inf], dtype='float64')
y = paddle.to_tensor([1, 4, 5], dtype='float64')
res = paddle.subtract(x, y)
print(res)
# [ 4., inf., -inf.]
"""
op_type = 'elementwise_sub'
axis = -1
act = None
if in_dygraph_mode():
return _elementwise_op_in_dygraph(
x, y, axis=axis, act=act, op_name=op_type)
return _elementwise_op(LayerHelper(op_type, **locals()))
@inplace_apis_in_dygraph_only
def subtract_(x, y, name=None):
"""
Inplace version of ``subtract`` API, the output Tensor will be inplaced with input ``x``.
Please refer to :ref:`api_tensor_subtract`.
"""
axis = -1
act = None
out_shape = broadcast_shape(x.shape, y.shape)
if out_shape != x.shape:
raise ValueError("The shape of broadcast output {} is different from that of inplace tensor {} in the Inplace operation.".format(out_shape, x.shape))
out = _elementwise_op_in_dygraph(
x, y, axis=axis, act=act, op_name='elementwise_sub_')
return out
def divide(x, y, name=None):
"""
Divide two tensors element-wise. The equation is:
.. math::
out = x / y
**Note**:
``paddle.divide`` supports broadcasting. If you want know more about broadcasting, please refer to :ref:`user_guide_broadcasting` .
Args:
x (Tensor): the input tensor, it's data type should be float32, float64, int32, int64.
y (Tensor): the input tensor, it's data type should be float32, float64, int32, int64.
name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
Returns:
N-D Tensor. A location into which the result is stored. If x, y have different shapes and are "broadcastable", the resulting tensor shape is the shape of x and y after broadcasting. If x, y have the same shape, its shape is the same as x and y.
Examples:
.. code-block:: python
import paddle
x = paddle.to_tensor([2, 3, 4], dtype='float64')
y = paddle.to_tensor([1, 5, 2], dtype='float64')
z = paddle.divide(x, y)
print(z) # [2., 0.6, 2.]
"""
op_type = 'elementwise_div'
axis = -1
act = None
if in_dygraph_mode():
return _elementwise_op_in_dygraph(
x, y, axis=axis, act=act, op_name=op_type)
return _elementwise_op(LayerHelper(op_type, **locals()))
def floor_divide(x, y, name=None):
"""
Floor divide two tensors element-wise. The equation is:
.. math::
out = x // y
**Note**:
``paddle.floor_divide`` supports broadcasting. If you want know more about broadcasting, please refer to :ref:`user_guide_broadcasting` .
Args:
x (Tensor): the input tensor, it's data type should be int32, int64.
y (Tensor): the input tensor, it's data type should be int32, int64.
name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
Returns:
N-D Tensor. A location into which the result is stored. It's dimension equals with $x$.
Examples:
.. code-block:: python
import paddle
x = paddle.to_tensor([2, 3, 8, 7])
y = paddle.to_tensor([1, 5, 3, 3])
z = paddle.floor_divide(x, y)
print(z) # [2, 0, 2, 2]
"""
op_type = 'elementwise_floordiv'
axis = -1
if in_dygraph_mode():
return _elementwise_op_in_dygraph(
x, y, axis=axis, op_name=op_type)
return _elementwise_op(LayerHelper(op_type, **locals()))
def remainder(x, y, name=None):
r"""
Mod two tensors element-wise. The equation is:
.. math::
out = x \% y
**Note**:
``paddle.remainder`` supports broadcasting. If you want know more about broadcasting, please refer to :ref:`user_guide_broadcasting` .
Args:
x (Tensor): the input tensor, it's data type should be float32, float64, int32, int64.
y (Tensor): the input tensor, it's data type should be float32, float64, int32, int64.
name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
Returns:
N-D Tensor. A location into which the result is stored. If x, y have different shapes and are "broadcastable", the resulting tensor shape is the shape of x and y after broadcasting. If x, y have the same shape, its shape is the same as x and y.
Examples:
.. code-block:: python
import paddle
x = paddle.to_tensor([2, 3, 8, 7])
y = paddle.to_tensor([1, 5, 3, 3])
z = paddle.remainder(x, y)
print(z) # [0, 3, 2, 1]
"""
op_type = 'elementwise_mod'
axis = -1
if in_dygraph_mode():
return _elementwise_op_in_dygraph(
x, y, axis=axis, op_name=op_type)
return _elementwise_op(LayerHelper(op_type, **locals()))
mod = remainder # noqa: F841
floor_mod = remainder # noqa: F841
def multiply(x, y, name=None):
"""
multiply two tensors element-wise. The equation is:
.. math::
out = x * y
**Note**:
``paddle.multiply`` supports broadcasting. If you would like to know more about broadcasting, please refer to :ref:`user_guide_broadcasting` .
Args:
x (Tensor): the input tensor, its data type should be one of float32, float64, int32, int64, bool.
y (Tensor): the input tensor, its data type should be one of float32, float64, int32, int64, bool.
name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
Returns:
N-D Tensor. A location into which the result is stored. If x, y have different shapes and are "broadcastable", the resulting tensor shape is the shape of x and y after broadcasting. If x, y have the same shape, its shape is the same as x and y.
Examples:
.. code-block:: python
import paddle
x = paddle.to_tensor([[1, 2], [3, 4]])
y = paddle.to_tensor([[5, 6], [7, 8]])
res = paddle.multiply(x, y)
print(res) # [[5, 12], [21, 32]]
x = paddle.to_tensor([[[1, 2, 3], [1, 2, 3]]])
y = paddle.to_tensor([2])
res = paddle.multiply(x, y)
print(res) # [[[2, 4, 6], [2, 4, 6]]]
"""
op_type = 'elementwise_mul'
act = None
axis = -1
if in_dygraph_mode():
return _elementwise_op_in_dygraph(
x, y, axis=axis, act=act, op_name=op_type)
if x.dtype != y.dtype:
raise TypeError(
'Input tensors must be same type, but received type of x: %s, type of y: %s '
% (x.dtype, y.dtype))
return _elementwise_op(LayerHelper(op_type, **locals()))
def maximum(x, y, name=None):
"""
Compare two tensors and returns a new tensor containing the element-wise maxima. The equation is:
.. math::
out = max(x, y)
**Note**:
``paddle.maximum`` supports broadcasting. If you want know more about broadcasting, please refer to :ref:`user_guide_broadcasting` .
Args:
x (Tensor): the input tensor, it's data type should be float32, float64, int32, int64.
y (Tensor): the input tensor, it's data type should be float32, float64, int32, int64.
name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
Returns:
N-D Tensor. A location into which the result is stored. If x, y have different shapes and are "broadcastable", the resulting tensor shape is the shape of x and y after broadcasting. If x, y have the same shape, its shape is the same as x and y.
Examples:
.. code-block:: python
import numpy as np
import paddle
x = paddle.to_tensor([[1, 2], [7, 8]])
y = paddle.to_tensor([[3, 4], [5, 6]])
res = paddle.maximum(x, y)
print(res)
# [[3, 4],
# [7, 8]]
x = paddle.to_tensor([[1, 2, 3], [1, 2, 3]])
y = paddle.to_tensor([3, 0, 4])
res = paddle.maximum(x, y)
print(res)
# [[3, 2, 4],
# [3, 2, 4]]
x = paddle.to_tensor([2, 3, 5], dtype='float32')
y = paddle.to_tensor([1, np.nan, np.nan], dtype='float32')
res = paddle.maximum(x, y)
print(res)
# [ 2., nan, nan]
x = paddle.to_tensor([5, 3, np.inf], dtype='float32')
y = paddle.to_tensor([1, -np.inf, 5], dtype='float32')
res = paddle.maximum(x, y)
print(res)
# [ 5., 3., inf.]
"""
op_type = 'elementwise_max'
axis = -1
act = None
if in_dygraph_mode():
return _elementwise_op_in_dygraph(
x, y, axis=axis, act=act, op_name=op_type)
return _elementwise_op(LayerHelper(op_type, **locals()))
def minimum(x, y, name=None):
"""
Compare two tensors and returns a new tensor containing the element-wise minima. The equation is:
.. math::
out = min(x, y)
**Note**:
``paddle.minimum`` supports broadcasting. If you want know more about broadcasting, please refer to :ref:`user_guide_broadcasting` .
Args:
x (Tensor): the input tensor, it's data type should be float32, float64, int32, int64.
y (Tensor): the input tensor, it's data type should be float32, float64, int32, int64.
name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
Returns:
N-D Tensor. A location into which the result is stored. If x, y have different shapes and are "broadcastable", the resulting tensor shape is the shape of x and y after broadcasting. If x, y have the same shape, its shape is the same as x and y.
Examples:
.. code-block:: python
import numpy as np
import paddle
x = paddle.to_tensor([[1, 2], [7, 8]])
y = paddle.to_tensor([[3, 4], [5, 6]])
res = paddle.minimum(x, y)
print(res)
# [[1, 2],
# [5, 6]]
x = paddle.to_tensor([[[1, 2, 3], [1, 2, 3]]])
y = paddle.to_tensor([3, 0, 4])
res = paddle.minimum(x, y)
print(res)
# [[[1, 0, 3],
# [1, 0, 3]]]
x = paddle.to_tensor([2, 3, 5], dtype='float32')
y = paddle.to_tensor([1, np.nan, np.nan], dtype='float32')
res = paddle.minimum(x, y)
print(res)
# [ 1., nan, nan]
x = paddle.to_tensor([5, 3, np.inf], dtype='float64')
y = paddle.to_tensor([1, -np.inf, 5], dtype='float64')
res = paddle.minimum(x, y)
print(res)
# [ 1., -inf., 5.]
"""
op_type = 'elementwise_min'
axis = -1
act = None
if in_dygraph_mode():
return _elementwise_op_in_dygraph(
x, y, axis=axis, act=act, op_name=op_type)
return _elementwise_op(LayerHelper(op_type, **locals()))
for func in [
add,
multiply
]:
proto_dict = {'add': 'elementwise_add', 'multiply': 'elementwise_mul'}
op_proto = OpProtoHolder.instance().get_op_proto(proto_dict[func.__name__])
additional_args_lines = [
"name (string, optional): Name of the output. \
Default is None. It's used to print debug info for developers. Details: \
:ref:`api_guide_Name` "
]
func.__doc__ = _generate_doc_string_(
op_proto,
additional_args_lines=additional_args_lines,
skip_attrs_set={"x_data_format", "y_data_format", "axis",
"use_quantizer", "mkldnn_data_type", "Scale_x", "Scale_y", "Scale_out"
}) + """\n""" + str(func.__doc__)
def sum(x, axis=None, dtype=None, keepdim=False, name=None):
"""
Computes the sum of tensor elements over the given dimension.
Args:
x (Tensor): An N-D Tensor, the data type is bool, float16, float32, float64, int32 or int64.
axis (int|list|tuple, optional): The dimensions along which the sum is performed. If
:attr:`None`, sum all elements of :attr:`x` and return a
Tensor with a single element, otherwise must be in the
range :math:`[-rank(x), rank(x))`. If :math:`axis[i] < 0`,
the dimension to reduce is :math:`rank + axis[i]`.
dtype (str, optional): The dtype of output Tensor. The default value is None, the dtype
of output is the same as input Tensor `x`.
keepdim (bool, optional): Whether to reserve the reduced dimension in the
output Tensor. The result Tensor will have one fewer dimension
than the :attr:`x` unless :attr:`keepdim` is true, default
value is False.
name (str, optional): The default value is None. Normally there is no need for
user to set this property. For more information, please refer to :ref:`api_guide_Name`
Returns:
Tensor: Results of summation operation on the specified axis of input Tensor `x`,
if `x.dtype='bool'`, `x.dtype='int32'`, it's data type is `'int64'`,
otherwise it's data type is the same as `x`.
Raises:
TypeError: The type of :attr:`axis` must be int, list or tuple.
Examples:
.. code-block:: python
import paddle
# x is a Tensor with following elements:
# [[0.2, 0.3, 0.5, 0.9]
# [0.1, 0.2, 0.6, 0.7]]
# Each example is followed by the corresponding output tensor.
x = paddle.to_tensor([[0.2, 0.3, 0.5, 0.9],
[0.1, 0.2, 0.6, 0.7]])
out1 = paddle.sum(x) # [3.5]
out2 = paddle.sum(x, axis=0) # [0.3, 0.5, 1.1, 1.6]
out3 = paddle.sum(x, axis=-1) # [1.9, 1.6]
out4 = paddle.sum(x, axis=1, keepdim=True) # [[1.9], [1.6]]
# y is a Tensor with shape [2, 2, 2] and elements as below:
# [[[1, 2], [3, 4]],
# [[5, 6], [7, 8]]]
# Each example is followed by the corresponding output tensor.
y = paddle.to_tensor([[[1, 2], [3, 4]],
[[5, 6], [7, 8]]])
out5 = paddle.sum(y, axis=[1, 2]) # [10, 26]
out6 = paddle.sum(y, axis=[0, 1]) # [16, 20]
# x is a Tensor with following elements:
# [[True, True, True, True]
# [False, False, False, False]]
# Each example is followed by the corresponding output tensor.
x = paddle.to_tensor([[True, True, True, True],
[False, False, False, False]])
out7 = paddle.sum(x) # [4]
out8 = paddle.sum(x, axis=0) # [1, 1, 1, 1]
out9 = paddle.sum(x, axis=1) # [4, 0]
"""
if axis is not None and not isinstance(axis, (list, tuple)):
axis = [axis]
if not axis:
reduce_all_flag = True
else:
if len(axis) == len(x.shape):
reduce_all_flag = True
else:
reduce_all_flag = False
def get_dtype(x, dtype):
if dtype is not None:
return (True, dtype)
src_type = convert_dtype(x.dtype)
if src_type in ['bool','int32', 'int64']:
return (True, 'int64')
return (False, src_type)
dtype_flag, dtype = get_dtype(x, dtype)
if in_dygraph_mode():
axis = axis if axis != None and axis != [] else [0]
if dtype_flag:
return _C_ops.reduce_sum(x, 'dim', axis, 'keep_dim', keepdim,
'reduce_all', reduce_all_flag, 'in_dtype',
x.dtype, 'out_dtype',
convert_np_dtype_to_dtype_(dtype))
else:
return _C_ops.reduce_sum(x, 'dim', axis, 'keep_dim', keepdim,
'reduce_all', reduce_all_flag)
attrs = {
'dim': axis if axis != None and axis != [] and axis != () else [0],
'keep_dim': keepdim,
'reduce_all': reduce_all_flag
}
if dtype_flag:
attrs.update({
'in_dtype': x.dtype,
'out_dtype': convert_np_dtype_to_dtype_(dtype)
})
check_variable_and_dtype(
x, 'x', ['bool', 'float16', 'float32', 'float64',
'int32', 'int64', 'complex64', 'complex128',
u'bool', u'float16', u'float32', u'float64',
u'int32', u'int64', u'complex64', u'complex128'], 'sum')
check_type(axis, 'axis', (int, list, tuple, type(None)), 'sum')
helper = LayerHelper('sum', **locals())
if dtype_flag:
out = helper.create_variable_for_type_inference(
dtype=convert_np_dtype_to_dtype_(dtype))
else:
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type='reduce_sum',
inputs={'X': x},
outputs={'Out': out},
attrs=attrs)
return out
@templatedoc(op_type="sum")
def add_n(inputs, name=None):
"""
This OP is used to sum one or more Tensor of the input.
For example:
.. code-block:: text
Case 1:
Input:
input.shape = [2, 3]
input = [[1, 2, 3],
[4, 5, 6]]
Output:
output.shape = [2, 3]
output = [[1, 2, 3],
[4, 5, 6]]
Case 2:
Input:
First input:
input1.shape = [2, 3]
Input1 = [[1, 2, 3],
[4, 5, 6]]
The second input:
input2.shape = [2, 3]
input2 = [[7, 8, 9],
[10, 11, 12]]
Output:
output.shape = [2, 3]
output = [[8, 10, 12],
[14, 16, 18]]
Args:
inputs (Tensor|list[Tensor]|tuple[Tensor]): A Tensor or a list/tuple of Tensors. The shape and data type of the list/tuple elements should be consistent.
Input can be multi-dimensional Tensor, and data types can be: float32, float64, int32, int64.
name(str, optional): The default value is None. Normally there is no need for
user to set this property. For more information, please refer to :ref:`api_guide_Name`
Returns:
Tensor, the sum of input :math:`inputs` , its shape and data types are consistent with :math:`inputs`.
Examples:
.. code-block:: python
import paddle
input0 = paddle.to_tensor([[1, 2, 3], [4, 5, 6]], dtype='float32')
input1 = paddle.to_tensor([[7, 8, 9], [10, 11, 12]], dtype='float32')
output = paddle.add_n([input0, input1])
# [[8., 10., 12.],
# [14., 16., 18.]]
"""
if in_dygraph_mode():
if isinstance(inputs, Variable):
inputs = [inputs]
return _C_ops.sum(inputs, 'use_mkldnn', False)
helper = LayerHelper('add_n', **locals())
check_type(inputs, 'inputs', (Variable, tuple, list), 'add_n')
if isinstance(inputs, list) or isinstance(inputs, tuple):
if len(inputs) > 0:
for input in inputs:
check_variable_and_dtype(input, "inputs", \
['float32', 'float64', 'int32', 'int64'], 'add_n')
else:
check_variable_and_dtype(inputs, "inputs", \
['float32', 'float64', 'int32', 'int64'], 'add_n')
out = helper.create_variable_for_type_inference(
dtype=helper.input_dtype('inputs'))
helper.append_op(
type='sum',
inputs={'X': inputs},
outputs={'Out': out},
attrs={'use_mkldnn': False})
return out
def trunc(input, name=None):
'''
This API is used to returns a new tensor with the truncated integer values of input.
Args:
input (Tensor): The input tensor, it's data type should be int32, int64, float32, float64.
name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
Returns:
Tensor: The output Tensor of trunc.
Examples:
.. code-block:: python
import paddle
input = paddle.rand([2,2],'float32')
print(input)
# Tensor(shape=[2, 2], dtype=float32, place=CUDAPlace(0), stop_gradient=True,
# [[0.02331470, 0.42374918],
# [0.79647720, 0.74970269]])
output = paddle.trunc(input)
print(output)
# Tensor(shape=[2, 2], dtype=float32, place=CUDAPlace(0), stop_gradient=True,
# [[0., 0.],
# [0., 0.]]))
'''
if in_dygraph_mode():
return _C_ops.trunc(input)
else:
inputs = {"X": input}
attrs = {}
helper = LayerHelper("trunc", **locals())
check_variable_and_dtype(input, 'X', ['int32', 'int64', 'float32', 'float64'], 'trunc')
out = helper.create_variable_for_type_inference(dtype=input.dtype)
helper.append_op(
type="trunc", inputs=inputs, attrs=attrs, outputs={"Out": out})
return out
def mm(input, mat2, name=None):
"""
Applies matrix multiplication to two tensors.
Currently, the input tensors' rank can be any, but when the rank of any
inputs is bigger than 3, this two inputs' rank should be equal.
Also note that if the raw tensor :math:`x` or :math:`mat2` is rank-1 and
nontransposed, the prepended or appended dimension :math:`1` will be
removed after matrix multiplication.
Args:
input (Tensor): The input tensor which is a Tensor.
mat2 (Tensor): The input tensor which is a Tensor.
name(str, optional): The default value is None. Normally there is no need for
user to set this property. For more information, please refer to :ref:`api_guide_Name`
Returns:
Tensor: The product Tensor.
Examples:
.. code-block:: python
import paddle
input = paddle.arange(1, 7).reshape((3, 2)).astype('float32')
mat2 = paddle.arange(1, 9).reshape((2, 4)).astype('float32')
out = paddle.mm(input, mat2)
print(out)
# [[11., 14., 17., 20.],
# [23., 30., 37., 44.],
# [35., 46., 57., 68.]])
"""
if in_dygraph_mode():
return _C_ops.matmul_v2(input, mat2)
def __check_input(x, y):
var_names = {'x': x, 'y': y}
for name, val in var_names.items():
check_variable_and_dtype(val, name,
['float16', 'float32', 'float64'], 'mm')
x_shape = list(x.shape)
y_shape = list(y.shape)
if len(x_shape) == 1:
x_shape = [1] + x_shape
if len(y_shape) == 1:
y_shape = y_shape + [1]
# check the inner 2 dimensions
if x_shape[-1] != y_shape[-2]:
if not ((x_shape[-1] == -1) or (y_shape[-2] == -1)):
raise ValueError(
"After performing an optional transpose, Input X's width should be "
"equal to Y's width for multiplication "
"prerequisites. But received X's shape: %s, Y's shape: %s\n"
% (x_shape, y_shape))
if len(y_shape) > 2 and len(x_shape) > 2:
for i, dim_x in enumerate(x_shape[:-2]):
# don't check neg shape
if dim_x < 0 or y_shape[i] < 0:
continue
if dim_x != y_shape[i]:
raise ValueError(
"When the matrix is larger than 2 dimensions, the higher "
"dimensional values of the two matrices need to be equal. "
"But received x_shape[%d] != y_shape[%d]. X's shape: %s, "
"Y's shape: %s.\n" % (i, i, x_shape, y_shape))
__check_input(input, mat2)
helper = LayerHelper('mm', **locals())
out = helper.create_variable_for_type_inference(dtype=input.dtype)
helper.append_op(
type='matmul_v2', inputs={'X': input,
'Y': mat2}, outputs={'Out': out})
return out
def addmm(input, x, y, beta=1.0, alpha=1.0, name=None):
"""
**addmm**
This operator is used to perform matrix multiplication for input $x$ and $y$.
$input$ is added to the final result.
The equation is:
.. math::
Out = alpha * x * y + beta * input
$Input$, $x$ and $y$ can carry the LoD (Level of Details) information, or not. But the output only shares the LoD information with input $input$.
Args:
input (Tensor): The input Tensor to be added to the final result.
x (Tensor): The first input Tensor for matrix multiplication.
y (Tensor): The second input Tensor for matrix multiplication.
beta (float): Coefficient of $input$.
alpha (float): Coefficient of $x*y$.
name (str, optional): Name of the output. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Default is None.
Returns:
Tensor: The output Tensor of addmm op.
Examples:
.. code-block:: python
import paddle
x = paddle.ones([2,2])
y = paddle.ones([2,2])
input = paddle.ones([2,2])
out = paddle.addmm( input=input, x=x, y=y, beta=0.5, alpha=5.0 )
print(out)
# [[10.5 10.5]
# [10.5 10.5]]
"""
input_shape = input.shape
x_shape = x.shape
y_shape = y.shape
if not len(input_shape) == len(x_shape) == len(y_shape) == 2:
raise ValueError("The dimention of input, x, y should be 2 but receive input's shape: {}, x's shape: {}, y's shape: {}".format(input_shape, x_shape, y_shape))
if input_shape[0] != x_shape[0]:
if input_shape[0] != 1:
raise ValueError( "When x's dimension[0] is not equal with input's dimension[0], input's dimension[0] must be 1 but got {}".format(input_shape[0]))
if input_shape[1] != y_shape[1] and input_shape[1] != 1:
raise ValueError( "When y's dimension[1] is not equal with input's dimension[1], input's dimension[1] must be 1 but got {}".format(input_shape[1]))
if input_shape[1] != y_shape[1]:
if input_shape[1] != 1:
raise ValueError( "When y's dimension[1] is not equal with input's dimension[1], input's dimension[1] must be 1 but got {}".format(input_shape[1]))
if input_shape[0] != x_shape[0] and input_shape[0] != 1:
raise ValueError( "When x's dimension[0] is not equal with input's dimension[0], input's dimension[0] must be 1 but got {}".format(input_shape[0]))
if x_shape[1] != y_shape[0]:
raise ValueError("The input Variable x's width must be equal with Variable y' height. But received x's shape = {}, y's shape = {}.".format(x_shape, y_shape))
if in_dygraph_mode():
out = _C_ops.addmm(input, x, y, "Alpha", alpha, "Beta", beta)
return out
inputs = {'Input': input, "X": x, "Y": y}
attrs = {'Alpha': alpha, 'Beta': beta}
helper = LayerHelper("addmm", **locals())
check_variable_and_dtype(input, 'Input', ['float32', 'float64'], 'addmm')
check_variable_and_dtype(x, 'X', ['float32', 'float64'], 'addmm')
check_variable_and_dtype(y, 'Y', ['float32', 'float64'], 'addmm')
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type="addmm", inputs=inputs, attrs=attrs, outputs={"Out": out})
return out
def logsumexp(x, axis=None, keepdim=False, name=None):
r"""
This OP calculates the log of the sum of exponentials of ``x`` along ``axis`` .
.. math::
logsumexp(x) = \\log\\sum exp(x)
Args:
x (Tensor): The input Tensor with data type float32 or float64, which
have no more than 4 dimensions.
axis (int|list|tuple, optional): The axis along which to perform
logsumexp calculations. ``axis`` should be int, list(int) or
tuple(int). If ``axis`` is a list/tuple of dimension(s), logsumexp
is calculated along all element(s) of ``axis`` . ``axis`` or
element(s) of ``axis`` should be in range [-D, D), where D is the
dimensions of ``x`` . If ``axis`` or element(s) of ``axis`` is
less than 0, it works the same way as :math:`axis + D` . If
``axis`` is None, logsumexp is calculated along all elements of
``x``. Default is None.
keepdim (bool, optional): Whether to reserve the reduced dimension(s)
in the output Tensor. If ``keep_dim`` is True, the dimensions of
the output Tensor is the same as ``x`` except in the reduced
dimensions(it is of size 1 in this case). Otherwise, the shape of
the output Tensor is squeezed in ``axis`` . Default is False.
name (str, optional): Name for the operation (optional, default is None).
For more information, please refer to :ref:`api_guide_Name`.
Returns:
Tensor, results of logsumexp along ``axis`` of ``x``, with the same data
type as ``x``.
Examples:
.. code-block:: python
import paddle
x = paddle.to_tensor([[-1.5, 0., 2.], [3., 1.2, -2.4]])
out1 = paddle.logsumexp(x) # [3.4691226]
out2 = paddle.logsumexp(x, 1) # [2.15317821, 3.15684602]
"""
if isinstance(axis, int):
axis = [axis]
reduce_all = True if axis is None \
or len(axis)==0 \
or len(axis) == len(x.shape) else False
if axis is None or len(axis) == 0:
axis = [0]
if in_dygraph_mode():
return _C_ops.logsumexp(x, 'axis', axis, 'keepdim', keepdim, 'reduce_all', reduce_all)
check_variable_and_dtype(x, 'x',
['float32', 'float64'],
'logsumexp')
helper = LayerHelper('logsumexp', **locals())
attrs = {'axis': axis, 'keepdim': keepdim, 'reduce_all':reduce_all}
out = helper.create_variable_for_type_inference(x.dtype)
helper.append_op(
type='logsumexp', inputs={'X': x}, outputs={'Out': out}, attrs=attrs)
return out
def inverse(x, name=None):
"""
Takes the inverse of the square matrix. A square matrix is a matrix with
the same number of rows and columns. The input can be a square matrix
(2-D Tensor) or batches of square matrices.
Args:
x (Tensor): The input tensor. The last two
dimensions should be equal. When the number of dimensions is
greater than 2, it is treated as batches of square matrix. The data
type can be float32 and float64.
name (str, optional): The default value is None. Normally there is no need for
user to set this property. For more information,
please refer to :ref:`api_guide_Name`
Returns:
Tensor: A Tensor holds the inverse of x. The shape and data type
is the same as x.
Examples:
.. code-block:: python
import paddle
mat = paddle.to_tensor([[2, 0], [0, 2]], dtype='float32')
inv = paddle.inverse(mat)
print(inv) # [[0.5, 0], [0, 0.5]]
"""
if in_dygraph_mode():
return _C_ops.inverse(x)
def _check_input(x):
check_variable_and_dtype(x, 'x',
['float32', 'float64'], 'inverse')
if len(x.shape) < 2:
raise ValueError(
"The input of inverse is expected to be a Tensor whose number "
"of dimensions is no less than 2. But reviced: %d, "
"x's shape: %s." % (len(x.shape), x.shape))
_check_input(x)
helper = LayerHelper('inverse', **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type='inverse', inputs={'Input': [x] }, outputs={'Output': [out]})
return out
def max(x, axis=None, keepdim=False, name=None):
"""
Computes the maximum of tensor elements over the given axis.
Args:
x(Tensor): A tensor, the data type is float32,
float64, int32, int64.
axis(int|list|tuple, optional): The axis along which the maximum is computed.
If :attr:`None`, compute the maximum over all elements of
`x` and return a Tensor with a single element,
otherwise must be in the range :math:`[-x.ndim(x), x.ndim(x))`.
If :math:`axis[i] < 0`, the axis to reduce is :math:`x.ndim + axis[i]`.
keepdim(bool, optional): Whether to reserve the reduced dimension in the
output Tensor. The result tensor will have one fewer dimension
than the `x` unless :attr:`keepdim` is true, default
value is False.
name(str, optional): The default value is None. Normally there is no need for
user to set this property. For more information, please refer to :ref:`api_guide_Name`
Returns:
Tensor, results of maximum on the specified axis of input tensor,
it's data type is the same as `x`.
Examples:
.. code-block:: python
import paddle
# data_x is a Tensor with shape [2, 4]
# the axis is a int element
x = paddle.to_tensor([[0.2, 0.3, 0.5, 0.9],
[0.1, 0.2, 0.6, 0.7]])
result1 = paddle.max(x)
print(result1)
#[0.9]
result2 = paddle.max(x, axis=0)
print(result2)
#[0.2 0.3 0.6 0.9]
result3 = paddle.max(x, axis=-1)
print(result3)
#[0.9 0.7]
result4 = paddle.max(x, axis=1, keepdim=True)
print(result4)
#[[0.9]
# [0.7]]
# data_y is a Tensor with shape [2, 2, 2]
# the axis is list
y = paddle.to_tensor([[[1.0, 2.0], [3.0, 4.0]],
[[5.0, 6.0], [7.0, 8.0]]])
result5 = paddle.max(y, axis=[1, 2])
print(result5)
#[4. 8.]
result6 = paddle.max(y, axis=[0, 1])
print(result6)
#[7. 8.]
"""
if axis is not None and not isinstance(axis, list):
if isinstance(axis, tuple):
axis = list(axis)
elif isinstance(axis, int):
axis= [axis]
else:
raise TypeError(
"The type of axis must be int, list or tuple, but received {}".format(type(axis)))
reduce_all = True if axis == None or axis == [] else False
axis = axis if axis != None and axis != [] else [0]
if in_dygraph_mode():
return _C_ops.reduce_max(x, 'dim', axis, 'keep_dim', keepdim,
'reduce_all', reduce_all)
helper = LayerHelper('max', **locals())
check_variable_and_dtype(
x, 'x', ['float32', 'float64', 'int32', 'int64'], 'max')
out = helper.create_variable_for_type_inference(
dtype=x.dtype)
helper.append_op(
type='reduce_max',
inputs={'X': x},
outputs={'Out': out},
attrs={
'dim': axis,
'keep_dim': keepdim,
'reduce_all': reduce_all
})
return out
def min(x, axis=None, keepdim=False, name=None):
"""
Computes the minimum of tensor elements over the given axis
Args:
x(Tensor): A tensor, the data type is float32, float64, int32, int64.
axis(int|list|tuple, optional): The axis along which the minimum is computed.
If :attr:`None`, compute the minimum over all elements of
`x` and return a Tensor with a single element,
otherwise must be in the range :math:`[-x.ndim, x.ndim)`.
If :math:`axis[i] < 0`, the axis to reduce is :math:`x.ndim + axis[i]`.
keepdim(bool, optional): Whether to reserve the reduced dimension in the
output Tensor. The result tensor will have one fewer dimension
than the `x` unless :attr:`keepdim` is true, default
value is False.
name(str, optional): The default value is None. Normally there is no need for
user to set this property. For more information, please refer to :ref:`api_guide_Name`
Returns:
Tensor, results of minimum on the specified axis of input tensor,
it's data type is the same as input's Tensor.
Examples:
.. code-block:: python
import paddle
# x is a tensor with shape [2, 4]
# the axis is a int element
x = paddle.to_tensor([[0.2, 0.3, 0.5, 0.9],
[0.1, 0.2, 0.6, 0.7]])
result1 = paddle.min(x)
print(result1)
#[0.1]
result2 = paddle.min(x, axis=0)
print(result2)
#[0.1 0.2 0.5 0.7]
result3 = paddle.min(x, axis=-1)
print(result3)
#[0.2 0.1]
result4 = paddle.min(x, axis=1, keepdim=True)
print(result4)
#[[0.2]
# [0.1]]
# y is a Tensor with shape [2, 2, 2]
# the axis is list
y = paddle.to_tensor([[[1.0, 2.0], [3.0, 4.0]],
[[5.0, 6.0], [7.0, 8.0]]])
result5 = paddle.min(y, axis=[1, 2])
print(result5)
#[1. 5.]
result6 = paddle.min(y, axis=[0, 1])
print(result6)
#[1. 2.]
"""
if axis is not None and not isinstance(axis, list):
if isinstance(axis, tuple):
axis = list(axis)
elif isinstance(axis, int):
axis= [axis]
else:
raise TypeError(
"The type of axis must be int, list or tuple, but received {}".format(type(axis)))
reduce_all = True if axis == None or axis == [] else False
axis = axis if axis != None and axis != [] else [0]
if in_dygraph_mode():
return _C_ops.reduce_min(x, 'dim', axis, 'keep_dim', keepdim,
'reduce_all', reduce_all)
helper = LayerHelper('min', **locals())
check_variable_and_dtype(
x, 'x', ['float32', 'float64', 'int32', 'int64'], 'min')
out = helper.create_variable_for_type_inference(
dtype=x.dtype)
helper.append_op(
type='reduce_min',
inputs={'X': x},
outputs={'Out': out},
attrs={
'dim': axis,
'keep_dim': keepdim,
'reduce_all': reduce_all
})
return out
def log1p(x, name=None):
r"""
Calculates the natural log of the given input tensor, element-wise.
.. math::
Out = \\ln(x+1)
Args:
x (Tensor): Input Tensor. Must be one of the following types: float32, float64.
name(str, optional): The default value is None. Normally there is no need for
user to set this property. For more information, please refer to :ref:`api_guide_Name`
Returns:
Tensor, the natural log of the input Tensor computed element-wise.
Examples:
.. code-block:: python
import paddle
data = paddle.to_tensor([[0], [1]], dtype='float32')
res = paddle.log1p(data)
# [[0.], [0.6931472]]
"""
if in_dygraph_mode():
return _C_ops.log1p(x)
check_variable_and_dtype(x, 'x', ['float32', 'float64'], "log1p")
inputs = {'X': [x]}
helper = LayerHelper('log1p', **locals())
dtype = helper.input_dtype(input_param_name='x')
out = helper.create_variable_for_type_inference(dtype)
helper.append_op(type="log1p", inputs={"X": x}, outputs={"Out": out})
return out
def log2(x, name=None):
r"""
Calculates the log to the base 2 of the given input tensor, element-wise.
.. math::
Out = \\log_2x
Args:
x (Tensor): Input tensor must be one of the following types: float32, float64.
name (str|None): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`
Returns:
Tensor: The log to the base 2 of the input Tensor computed element-wise.
Examples:
.. code-block:: python
import paddle
# example 1: x is a float
x_i = paddle.to_tensor([[1.0], [2.0]])
res = paddle.log2(x_i) # [[0.], [1.0]]
# example 2: x is float32
x_i = paddle.full(shape=[1], fill_value=2, dtype='float32')
paddle.to_tensor(x_i)
res = paddle.log2(x_i)
print(res) # [1.0]
# example 3: x is float64
x_i = paddle.full(shape=[1], fill_value=2, dtype='float64')
paddle.to_tensor(x_i)
res = paddle.log2(x_i)
print(res) # [1.0]
"""
if in_dygraph_mode():
return _C_ops.log2(x)
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], "log2")
inputs = {'X': [x]}
helper = LayerHelper('log2', **locals())
dtype = helper.input_dtype(input_param_name='x')
out = helper.create_variable_for_type_inference(dtype)
helper.append_op(type="log2", inputs={"X": x}, outputs={"Out": out})
return out
def log10(x, name=None):
r"""
Calculates the log to the base 10 of the given input tensor, element-wise.
.. math::
Out = \\log_10_x
Args:
x (Tensor): Input tensor must be one of the following types: float32, float64.
name (str|None): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`
Returns:
Tensor: The log to the base 10 of the input Tensor computed element-wise.
Examples:
.. code-block:: python
import paddle
# example 1: x is a float
x_i = paddle.to_tensor([[1.0], [10.0]])
res = paddle.log10(x_i) # [[0.], [1.0]]
# example 2: x is float32
x_i = paddle.full(shape=[1], fill_value=10, dtype='float32')
paddle.to_tensor(x_i)
res = paddle.log10(x_i)
print(res) # [1.0]
# example 3: x is float64
x_i = paddle.full(shape=[1], fill_value=10, dtype='float64')
paddle.to_tensor(x_i)
res = paddle.log10(x_i)
print(res) # [1.0]
"""
if in_dygraph_mode():
return _C_ops.log10(x)
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], "log10")
inputs = {'X': [x]}
helper = LayerHelper('log10', **locals())
dtype = helper.input_dtype(input_param_name='x')
out = helper.create_variable_for_type_inference(dtype)
helper.append_op(type="log10", inputs={"X": x}, outputs={"Out": out})
return out
def clip(x, min=None, max=None, name=None):
"""
This operator clip all elements in input into the range [ min, max ] and return
a resulting tensor as the following equation:
.. math::
Out = MIN(MAX(x, min), max)
Args:
x (Tensor): An N-D Tensor with data type float32, float64, int32 or int64.
min (float|int|Tensor): The lower bound with type ``float`` , ``int`` or a ``Tensor``
with shape [1] and type ``int32``, ``float32``, ``float64``.
max (float|int|Tensor): The upper bound with type ``float``, ``int`` or a ``Tensor``
with shape [1] and type ``int32``, ``float32``, ``float64``.
name (str, optional): The default value is None. Normally there is no
need for user to set this property. For more information, please
refer to :ref:`api_guide_Name`.
Returns:
Tensor: A Tensor with the same data type and data shape as input.
Examples:
.. code-block:: python
import paddle
x1 = paddle.to_tensor([[1.2, 3.5], [4.5, 6.4]], 'float32')
out1 = paddle.clip(x1, min=3.5, max=5.0)
out2 = paddle.clip(x1, min=2.5)
print(out1)
# [[3.5, 3.5]
# [4.5, 5.0]]
print(out2)
# [[2.5, 3.5]
# [[4.5, 6.4]
"""
x_dtype = str(x.dtype)
if x_dtype == 'paddle.int32':
min_ = np.iinfo(np.int32).min
max_ = np.iinfo(np.int32).max - 2**7
elif x_dtype == 'paddle.int64':
min_ = np.iinfo(np.int64).min
max_ = np.iinfo(np.int64).max - 2**39
else:
min_ = float(np.finfo(np.float32).min)
max_ = float(np.finfo(np.float32).max)
if in_dygraph_mode():
if isinstance(min, Variable):
min = min.numpy().item(0)
if isinstance(max, Variable):
max = max.numpy().item(0)
min = min_ if min is None else min
max = max_ if max is None else max
return _C_ops.clip(x, "min", min, "max", max)
if min is not None:
check_type(min, 'min', (float, int, Variable), 'clip')
if isinstance(min, Variable):
check_dtype(min.dtype, 'min', ['float32', 'float64', 'int32'],
'clip', '(When the type of min in clip is Variable.)')
if max is not None:
check_type(max, 'max', (float, int, Variable), 'clip')
if isinstance(max, Variable):
check_dtype(max.dtype, 'max', ['float32', 'float64', 'int32'],
'clip', '(When the type of max in clip is Variable.)')
check_variable_and_dtype(x, 'x', ['float32', 'float64', 'int32', 'int64'], 'clip')
inputs = {'X': x}
attrs = {'min': min_, 'max': max_}
if isinstance(min, Variable):
min.stop_gradient = True
inputs['Min'] = min
elif min is not None:
attrs['min'] = min
if isinstance(max, Variable):
max.stop_gradient = True
inputs['Max'] = max
elif max is not None:
attrs['max'] = max
helper = LayerHelper('clip', **locals())
output = helper.create_variable_for_type_inference(
dtype=helper.input_dtype('x'))
helper.append_op(
type='clip', inputs=inputs, outputs={'Out': [output]}, attrs=attrs)
return output
@inplace_apis_in_dygraph_only
def clip_(x, min=None, max=None, name=None):
"""
Inplace version of ``clip`` API, the output Tensor will be inplaced with input ``x``.
Please refer to :ref:`api_tensor_clip`.
"""
fmin = float(np.finfo(np.float32).min)
fmax = float(np.finfo(np.float32).max)
if isinstance(min, Variable):
min = min.numpy().item(0)
if isinstance(max, Variable):
max = max.numpy().item(0)
min = fmin if min is None else min
max = fmax if max is None else max
return _C_ops.clip_(x, "min", min, "max", max)
def trace(x, offset=0, axis1=0, axis2=1, name=None):
"""
**trace**
This OP computes the sum along diagonals of the input tensor x.
If ``x`` is 2D, returns the sum of diagonal.
If ``x`` has larger dimensions, then returns an tensor of diagonals sum, diagonals be taken from
the 2D planes specified by axis1 and axis2. By default, the 2D planes formed by the first and second axes
of the input tensor x.
The argument ``offset`` determines where diagonals are taken from input tensor x:
- If offset = 0, it is the main diagonal.
- If offset > 0, it is above the main diagonal.
- If offset < 0, it is below the main diagonal.
- Note that if offset is out of input's shape indicated by axis1 and axis2, 0 will be returned.
Args:
x(Tensor): The input tensor x. Must be at least 2-dimensional. The input data type should be float32, float64, int32, int64.
offset(int, optional): Which diagonals in input tensor x will be taken. Default: 0 (main diagonals).
axis1(int, optional): The first axis with respect to take diagonal. Default: 0.
axis2(int, optional): The second axis with respect to take diagonal. Default: 1.
name (str, optional): Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Default: None.
Returns:
Tensor: the output data type is the same as input data type.
Examples:
.. code-block:: python
import paddle
case1 = paddle.randn([2, 3])
case2 = paddle.randn([3, 10, 10])
case3 = paddle.randn([3, 10, 5, 10])
data1 = paddle.trace(case1) # data1.shape = [1]
data2 = paddle.trace(case2, offset=1, axis1=1, axis2=2) # data2.shape = [3]
data3 = paddle.trace(case3, offset=-3, axis1=1, axis2=-1) # data2.shape = [3, 5]
"""
def __check_input(input, offset, dim1, dim2):
check_dtype(x.dtype, 'Input',
['int32', 'int64', 'float16', 'float32', 'float64'],
'trace')
input_shape = list(x.shape)
assert len(input_shape) >= 2, \
"The x must be at least 2-dimensional, " \
"But received Input x's dimensional: %s.\n" % \
len(input_shape)
axis1_ = axis1 if axis1 >= 0 else len(input_shape) + axis1
axis2_ = axis2 if axis2 >= 0 else len(input_shape) + axis2
assert ((0 <= axis1_) and (axis1_ < len(input_shape))), \
"The argument axis1 is out of range (expected to be in range of [%d, %d], but got %d).\n" \
% (-(len(input_shape)), len(input_shape) - 1, axis1)
assert ((0 <= axis2_) and (axis2_ < len(input_shape))), \
"The argument axis2 is out of range (expected to be in range of [%d, %d], but got %d).\n" \
% (-(len(input_shape)), len(input_shape) - 1, axis2)
assert axis1_ != axis2_, \
"axis1 and axis2 cannot be the same axis." \
"But received axis1 = %d, axis2 = %d\n"%(axis1, axis2)
__check_input(input, offset, axis1, axis2)
if in_dygraph_mode():
return _C_ops.trace(x, 'offset', offset, 'axis1', axis1, 'axis2', axis2)
inputs = {'Input': [x]}
attrs = {'offset': offset, 'axis1': axis1, 'axis2': axis2}
helper = LayerHelper('trace', **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type='trace',
inputs={'Input': [x]},
attrs={'offset': offset,
'axis1': axis1,
'axis2': axis2},
outputs={'Out': [out]})
return out
def diagonal(x, offset=0, axis1=0, axis2=1, name=None):
"""
This OP computes the diagonals of the input tensor x.
If ``x`` is 2D, returns the diagonal.
If ``x`` has larger dimensions, diagonals be taken from the 2D planes specified by axis1 and axis2.
By default, the 2D planes formed by the first and second axis of the input tensor x.
The argument ``offset`` determines where diagonals are taken from input tensor x:
- If offset = 0, it is the main diagonal.
- If offset > 0, it is above the main diagonal.
- If offset < 0, it is below the main diagonal.
Args:
x(Tensor): The input tensor x. Must be at least 2-dimensional. The input data type should be bool, int32, int64, float16, float32, float64.
offset(int, optional): Which diagonals in input tensor x will be taken. Default: 0 (main diagonals).
axis1(int, optional): The first axis with respect to take diagonal. Default: 0.
axis2(int, optional): The second axis with respect to take diagonal. Default: 1.
name (str, optional): Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Default: None.
Returns:
Tensor: a partial view of input tensor in specify two dimensions, the output data type is the same as input data type.
Examples:
.. code-block:: python
import paddle
x = paddle.rand([2,2,3],'float32')
print(x)
# Tensor(shape=[2, 2, 3], dtype=float32, place=CUDAPlace(0), stop_gradient=True,
# [[[0.45661032, 0.03751532, 0.90191704],
# [0.43760979, 0.86177313, 0.65221709]],
# [[0.17020577, 0.00259554, 0.28954273],
# [0.51795638, 0.27325270, 0.18117726]]])
out1 = paddle.diagonal(x)
print(out1)
#Tensor(shape=[3, 2], dtype=float32, place=CUDAPlace(0), stop_gradient=True,
# [[0.45661032, 0.51795638],
# [0.03751532, 0.27325270],
# [0.90191704, 0.18117726]])
out2 = paddle.diagonal(x, offset=0, axis1=2, axis2=1)
print(out2)
#Tensor(shape=[2, 2], dtype=float32, place=CUDAPlace(0), stop_gradient=True,
# [[0.45661032, 0.86177313],
# [0.17020577, 0.27325270]])
out3 = paddle.diagonal(x, offset=1, axis1=0, axis2=1)
print(out3)
#Tensor(shape=[3, 1], dtype=float32, place=CUDAPlace(0), stop_gradient=True,
# [[0.43760979],
# [0.86177313],
# [0.65221709]])
out4 = paddle.diagonal(x, offset=0, axis1=1, axis2=2)
print(out4)
#Tensor(shape=[2, 2], dtype=float32, place=CUDAPlace(0), stop_gradient=True,
# [[0.45661032, 0.86177313],
# [0.17020577, 0.27325270]])
"""
if in_dygraph_mode():
return _C_ops.diagonal(x, 'offset', offset, 'axis1', axis1, 'axis2', axis2)
def __check_input(input, offset, dim1, dim2):
check_dtype(x.dtype, 'Input',
['bool', 'int32', 'int64', 'float16', 'float32', 'float64'],
'diagonal')
input_shape = list(x.shape)
assert len(input_shape) >= 2, \
"The x must be at least 2-dimensional, " \
"But received Input x's dimensional: %s.\n" % \
len(input_shape)
axis1_ = axis1 if axis1 >= 0 else len(input_shape) + axis1
axis2_ = axis2 if axis2 >= 0 else len(input_shape) + axis2
assert axis1_ < len(input_shape), \
"The argument axis1 is out of range (expected to be in range of [%d, %d], but got %d).\n" \
% (-(len(input_shape)), len(input_shape) - 1, axis1)
assert axis2_ < len(input_shape), \
"The argument axis2 is out of range (expected to be in range of [%d, %d], but got %d).\n" \
% (-(len(input_shape)), len(input_shape) - 1, axis2)
assert axis1_ != axis2_, \
"axis1 and axis2 cannot be the same axis." \
"But received axis1 = %d, axis2 = %d\n"%(axis1, axis2)
__check_input(input, offset, axis1, axis2)
helper = LayerHelper('diagonal', **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type='diagonal',
inputs={'Input': [x]},
attrs={'offset': offset,
'axis1': axis1,
'axis2': axis2},
outputs={'Out': [out]})
return out
@templatedoc(op_type="kron")
def kron(x, y, name=None):
"""
${comment}
Args:
x (Tensor): the fist operand of kron op, data type: float16, float32,
float64, int32 or int64.
y (Tensor): the second operand of kron op, data type: float16,
float32, float64, int32 or int64. Its data type should be the same
with x.
name(str, optional): The default value is None. Normally there is no
need for user to set this property. For more information, please
refer to :ref:`api_guide_Name`.
Returns:
Tensor: The output of kron op, data type: float16, float32, float64, int32 or int64. Its data is the same with x.
Examples:
.. code-block:: python
import paddle
x = paddle.to_tensor([[1, 2], [3, 4]], dtype='int64')
y = paddle.to_tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype='int64')
out = paddle.kron(x, y)
print(out)
# [[1, 2, 3, 2, 4, 6],
# [ 4, 5, 6, 8, 10, 12],
# [ 7, 8, 9, 14, 16, 18],
# [ 3, 6, 9, 4, 8, 12],
# [12, 15, 18, 16, 20, 24],
# [21, 24, 27, 28, 32, 36]])
"""
if in_dygraph_mode():
return _C_ops.kron(x, y)
helper = LayerHelper('kron', **locals())
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64', 'int32', 'int64'], 'kron')
check_variable_and_dtype(y, 'y', ['float16', 'float32', 'float64', 'int32', 'int64'], 'kron')
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(type="kron", inputs={"X": x, "Y": y}, outputs={"Out": out})
return out
def cumsum(x, axis=None, dtype=None, name=None):
"""
The cumulative sum of the elements along a given axis.
**Note**:
The first element of the result is the same of the first element of the input.
Args:
x (Tensor): The input tensor needed to be cumsumed.
axis (int, optional): The dimension to accumulate along. -1 means the last dimension. The default (None) is to compute the cumsum over the flattened array.
dtype (str, optional): The data type of the output tensor, can be float32, float64, int32, int64. If specified, the input tensor is casted to dtype before the operation is performed. This is useful for preventing data type overflows. The default value is None.
name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
Returns:
Tensor, the result of cumsum operator.
Examples:
.. code-block:: python
import paddle
data = paddle.arange(12)
data = paddle.reshape(data, (3, 4))
y = paddle.cumsum(data)
# [ 0 1 3 6 10 15 21 28 36 45 55 66]
y = paddle.cumsum(data, axis=0)
# [[ 0 1 2 3]
# [ 4 6 8 10]
# [12 15 18 21]]
y = paddle.cumsum(data, axis=-1)
# [[ 0 1 3 6]
# [ 4 9 15 22]
# [ 8 17 27 38]]
y = paddle.cumsum(data, dtype='float64')
print(y.dtype)
# VarType.FP64
"""
if axis is None:
flatten = True
else:
flatten = False
if dtype is not None and x.dtype != convert_np_dtype_to_dtype_(dtype):
x = layers.cast(x, dtype)
if in_dygraph_mode():
if axis is None:
return _C_ops.cumsum(x, 'flatten', flatten)
else:
return _C_ops.cumsum(x, 'axis', axis, 'flatten', flatten)
check_type(x, 'x', (Variable), 'cumsum')
locals_var = locals().copy()
kwargs = dict()
for name, val in locals_var.items():
if val is not None:
kwargs[name] = val
_cum_sum_ = generate_layer_fn('cumsum')
return _cum_sum_(**kwargs)
def cumprod(x, dim=None, dtype=None, name=None):
"""
Compute the cumulative product of the input tensor x along a given dimension dim.
**Note**:
The first element of the result is the same as the first element of the input.
Args:
x (Tensor): the input tensor need to be cumproded.
dim (int): the dimension along which the input tensor will be accumulated. It need to be in the range of [-x.rank, x.rank), where x.rank means the dimensions of the input tensor x and -1 means the last dimension.
dtype (str, optional): The data type of the output tensor, can be float32, float64, int32, int64, complex64, complex128. If specified, the input tensor is casted to dtype before the operation is performed. This is useful for preventing data type overflows. The default value is None.
name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
Returns:
Tensor, the result of cumprod operator.
Examples:
.. code-block:: python
import paddle
data = paddle.arange(12)
data = paddle.reshape(data, (3, 4))
# [[ 0 1 2 3 ]
# [ 4 5 6 7 ]
# [ 8 9 10 11]]
y = paddle.cumprod(data, dim=0)
# [[ 0 1 2 3]
# [ 0 5 12 21]
# [ 0 45 120 231]]
y = paddle.cumprod(data, dim=-1)
# [[ 0 0 0 0]
# [ 4 20 120 840]
# [ 8 72 720 7920]]
y = paddle.cumprod(data, dim=1, dtype='float64')
# [[ 0. 0. 0. 0.]
# [ 4. 20. 120. 840.]
# [ 8. 72. 720. 7920.]]
print(y.dtype)
# paddle.float64
"""
if dtype is not None and x.dtype != convert_np_dtype_to_dtype_(dtype):
x = layers.cast(x, dtype)
if in_dygraph_mode():
return _C_ops.cumprod(x, 'dim', dim)
check_variable_and_dtype(x, "x", ['complex64', 'complex128', 'float32', 'float64', 'int32', 'int64'], 'cumprod')
check_type(dim, 'dim', int, 'cumprod')
helper = LayerHelper('cumprod', **locals())
out = helper.create_variable_for_type_inference(x.dtype)
helper.append_op(type='cumprod', inputs={'X': x}, outputs={'Out': out}, attrs={'dim': dim})
return out
def isfinite(x, name=None):
"""
Return whether every element of input tensor is finite number or not.
Args:
x (Tensor): The input tensor, it's data type should be float16, float32, float64, int32, int64.
name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
Returns:
`Tensor`, the bool result which shows every element of `x` whether it is finite number or not.
Examples:
.. code-block:: python
import paddle
x = paddle.to_tensor([float('-inf'), -2, 3.6, float('inf'), 0, float('-nan'), float('nan')])
out = paddle.tensor.isfinite(x)
print(out) # [False True True False True False False]
"""
if in_dygraph_mode():
return _C_ops.isfinite_v2(x)
helper = LayerHelper("isfinite_v2", **locals())
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64', 'int32', 'int64'], 'isfinite')
out = helper.create_variable_for_type_inference('bool')
helper.append_op(type="isfinite_v2", inputs={"X": x}, outputs={"Out": out})
return out
def isinf(x, name=None):
"""
Return whether every element of input tensor is `+/-INF` or not.
Args:
x (Tensor): The input tensor, it's data type should be float16, float32, float64, int32, int64.
name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
Returns:
`Tensor`, the bool result which shows every element of `x` whether it is `+/-INF` or not.
Examples:
.. code-block:: python
import paddle
x = paddle.to_tensor([float('-inf'), -2, 3.6, float('inf'), 0, float('-nan'), float('nan')])
out = paddle.tensor.isinf(x)
print(out) # [ True False False True False False False]
"""
if in_dygraph_mode():
return _C_ops.isinf_v2(x)
helper = LayerHelper("isinf_v2", **locals())
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64', 'int32', 'int64'], 'isinf')
out = helper.create_variable_for_type_inference(dtype='bool')
helper.append_op(type="isinf_v2", inputs={"X": x}, outputs={"Out": out})
return out
def isnan(x, name=None):
"""
Return whether every element of input tensor is `NaN` or not.
Args:
x (Tensor): The input tensor, it's data type should be float16, float32, float64, int32, int64.
name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
Returns:
`Tensor`, the bool result which shows every element of `x` whether it is `NaN` or not.
Examples:
.. code-block:: python
import paddle
x = paddle.to_tensor([float('-inf'), -2, 3.6, float('inf'), 0, float('-nan'), float('nan')])
out = paddle.tensor.isnan(x)
print(out) # [False False False False False True True]
"""
if in_dygraph_mode():
return _C_ops.isnan_v2(x)
helper = LayerHelper("isnan_v2", **locals())
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64', 'int32', 'int64'], 'isnan')
out = helper.create_variable_for_type_inference(dtype='bool')
helper.append_op(type="isnan_v2", inputs={"X": x}, outputs={"Out": out})
return out
def prod(x, axis=None, keepdim=False, dtype=None, name=None):
"""
Compute the product of tensor elements over the given axis.
Args:
x(Tensor): The input tensor, its data type should be float32, float64, int32, int64.
axis(int|list|tuple, optional): The axis along which the product is computed. If :attr:`None`,
multiply all elements of `x` and return a Tensor with a single element,
otherwise must be in the range :math:`[-x.ndim, x.ndim)`. If :math:`axis[i]<0`,
the axis to reduce is :math:`x.ndim + axis[i]`. Default is None.
dtype(str|np.dtype, optional): The desired date type of returned tensor, can be float32, float64,
int32, int64. If specified, the input tensor is casted to dtype before operator performed.
This is very useful for avoiding data type overflows. The default value is None, the dtype
of output is the same as input Tensor `x`.
keepdim(bool, optional): Whether to reserve the reduced dimension in the output Tensor. The result
tensor will have one fewer dimension than the input unless `keepdim` is true. Default is False.
name(string, optional): The default value is None. Normally there is no need for user to set this property.
For more information, please refer to :ref:`api_guide_Name` .
Returns:
Tensor, result of product on the specified dim of input tensor.
Raises:
ValueError: The :attr:`dtype` must be float32, float64, int32 or int64.
TypeError: The type of :attr:`axis` must be int, list or tuple.
Examples:
.. code-block:: python
import paddle
# the axis is a int element
x = paddle.to_tensor([[0.2, 0.3, 0.5, 0.9],
[0.1, 0.2, 0.6, 0.7]])
out1 = paddle.prod(x)
# [0.0002268]
out2 = paddle.prod(x, -1)
# [0.027 0.0084]
out3 = paddle.prod(x, 0)
# [0.02 0.06 0.3 0.63]
out4 = paddle.prod(x, 0, keepdim=True)
# [[0.02 0.06 0.3 0.63]]
out5 = paddle.prod(x, 0, dtype='int64')
# [0 0 0 0]
# the axis is list
y = paddle.to_tensor([[[1.0, 2.0], [3.0, 4.0]],
[[5.0, 6.0], [7.0, 8.0]]])
out6 = paddle.prod(y, [0, 1])
# [105. 384.]
out7 = paddle.prod(y, (1, 2))
# [ 24. 1680.]
"""
if dtype is not None:
check_dtype(dtype, 'dtype', ['float32', 'float64', 'int32', 'int64'], 'prod')
if x.dtype != convert_np_dtype_to_dtype_(dtype):
x = layers.cast(x, dtype)
return layers.reduce_prod(input=x, dim=axis, keep_dim=keepdim, name=name)
def sign(x, name=None):
"""
This OP returns sign of every element in `x`: 1 for positive, -1 for negative and 0 for zero.
Args:
x(Tensor): The input tensor. The data type can be float16, float32 or float64.
name (str, optional): The default value is None. Normally there is no need for user to
set this property. For more information, please refer to :ref:`api_guide_Name`
Returns:
Tensor: The output sign tensor with identical shape and data type to the input :attr:`x`.
Examples:
.. code-block:: python
import paddle
x = paddle.to_tensor([3.0, 0.0, -2.0, 1.7], dtype='float32')
out = paddle.sign(x=x)
print(out) # [1.0, 0.0, -1.0, 1.0]
"""
if in_dygraph_mode():
return _C_ops.sign(x)
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'sign')
helper = LayerHelper("sign", **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(type='sign', inputs={'X': [x]}, outputs={'Out': [out]})
return out
def tanh(x, name=None):
r"""
Tanh Activation Operator.
.. math::
out = \\frac{e^{x} - e^{-x}}{e^{x} + e^{-x}}
Args:
x (Tensor): Input of Tanh operator, an N-D Tensor, with data type float32, float64 or float16.
name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
Returns:
Output of Tanh operator, a Tensor with same data type and shape as input.
Examples:
.. code-block:: python
import paddle
x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
out = paddle.tanh(x)
print(out)
# [-0.37994896 -0.19737532 0.09966799 0.29131261]
"""
if in_dygraph_mode():
return _C_ops.tanh(x)
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'tanh')
check_type(x, 'x', (Variable), 'tanh')
helper = LayerHelper('tanh', **locals())
out = helper.create_variable_for_type_inference(x.dtype)
helper.append_op(type='tanh', inputs={'X': x}, outputs={'Out': out})
return out
@inplace_apis_in_dygraph_only
def tanh_(x, name=None):
r"""
Inplace version of ``tanh`` API, the output Tensor will be inplaced with input ``x``.
Please refer to :ref:`api_tensor_tanh`.
"""
return _C_ops.tanh_(x)
def increment(x, value=1.0, name=None):
"""
The OP is usually used for control flow to increment the data of :attr:`x` by an amount :attr:`value`.
Notice that the number of elements in :attr:`x` must be equal to 1.
Args:
x (Tensor): A tensor that must always contain only one element, its data type supports float32, float64, int32 and int64.
value(float, optional): The amount to increment the data of :attr:`x`. Default: 1.0.
name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
Returns:
Tensor, the elementwise-incremented tensor with the same shape and data type as :attr:`x`.
Examples:
.. code-block:: python
import paddle
data = paddle.zeros(shape=[1], dtype='float32')
counter = paddle.increment(data)
# [1.]
"""
if in_dygraph_mode():
return _C_ops.increment(x, 'step', value)
check_variable_and_dtype(x, 'x', ['float32', 'float64', 'int32', 'int64'],
'increment')
helper = LayerHelper("increment", **locals())
helper.append_op(
type='increment',
inputs={'X': [x]},
outputs={'Out': [x]},
attrs={'step': float(value)})
return x
def all(x, axis=None, keepdim=False, name=None):
"""
Computes the the ``logical and`` of tensor elements over the given dimension.
Args:
x (Tensor): An N-D Tensor, the input data type should be `bool`.
axis (int|list|tuple, optional): The dimensions along which the ``logical and`` is compute. If
:attr:`None`, and all elements of :attr:`x` and return a
Tensor with a single element, otherwise must be in the
range :math:`[-rank(x), rank(x))`. If :math:`axis[i] < 0`,
the dimension to reduce is :math:`rank + axis[i]`.
keepdim (bool, optional): Whether to reserve the reduced dimension in the
output Tensor. The result Tensor will have one fewer dimension
than the :attr:`x` unless :attr:`keepdim` is true, default
value is False.
name (str, optional): The default value is None. Normally there is no need for
user to set this property. For more information, please refer to :ref:`api_guide_Name`
Returns:
Tensor: Results the ``logical and`` on the specified axis of input Tensor `x`, it's data type is bool.
Raises:
ValueError: If the data type of `x` is not bool.
TypeError: The type of :attr:`axis` must be int, list or tuple.
Examples:
.. code-block:: python
import paddle
import numpy as np
# x is a bool Tensor with following elements:
# [[True, False]
# [True, True]]
x = paddle.assign(np.array([[1, 0], [1, 1]], dtype='int32'))
print(x)
x = paddle.cast(x, 'bool')
# out1 should be [False]
out1 = paddle.all(x) # [False]
print(out1)
# out2 should be [True, False]
out2 = paddle.all(x, axis=0) # [True, False]
print(out2)
# keep_dim=False, out3 should be [False, True], out.shape should be (2,)
out3 = paddle.all(x, axis=-1) # [False, True]
print(out3)
# keep_dim=True, out4 should be [[False], [True]], out.shape should be (2,1)
out4 = paddle.all(x, axis=1, keepdim=True)
out4 = paddle.cast(out4, 'int32') # [[False], [True]]
print(out4)
"""
if axis is not None and not isinstance(axis, (list, tuple)):
axis = [axis]
if not axis:
reduce_all_flag = True
else:
if len(axis) == len(x.shape):
reduce_all_flag = True
else:
reduce_all_flag = False
if in_dygraph_mode():
axis = axis if axis != None and axis != [] else [0]
return _C_ops.reduce_all(x, 'dim', axis, 'keep_dim', keepdim,
'reduce_all', reduce_all_flag)
attrs = {
'dim': axis if axis != None and axis != [] and axis != () else [0],
'keep_dim': keepdim,
'reduce_all': reduce_all_flag
}
check_variable_and_dtype(x, 'x', ['bool'], 'all')
check_type(axis, 'axis', (int, list, tuple, type(None)), 'all')
helper = LayerHelper('all', **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type='reduce_all',
inputs={'X': x},
outputs={'Out': out},
attrs=attrs)
return out
def any(x, axis=None, keepdim=False, name=None):
"""
Computes the the ``logical or`` of tensor elements over the given dimension.
Args:
x (Tensor): An N-D Tensor, the input data type should be `bool`.
axis (int|list|tuple, optional): The dimensions along which the ``logical or`` is compute. If
:attr:`None`, and all elements of :attr:`x` and return a
Tensor with a single element, otherwise must be in the
range :math:`[-rank(x), rank(x))`. If :math:`axis[i] < 0`,
the dimension to reduce is :math:`rank + axis[i]`.
keepdim (bool, optional): Whether to reserve the reduced dimension in the
output Tensor. The result Tensor will have one fewer dimension
than the :attr:`x` unless :attr:`keepdim` is true, default
value is False.
name (str, optional): The default value is None. Normally there is no need for
user to set this property. For more information, please refer to :ref:`api_guide_Name`
Returns:
Tensor: Results the ``logical or`` on the specified axis of input Tensor `x`, it's data type is bool.
Raises:
ValueError: If the data type of `x` is not bool.
TypeError: The type of :attr:`axis` must be int, list or tuple.
Examples:
.. code-block:: python
import paddle
import numpy as np
# x is a bool Tensor with following elements:
# [[True, False]
# [False, False]]
x = paddle.assign(np.array([[1, 0], [1, 1]], dtype='int32'))
print(x)
x = paddle.cast(x, 'bool')
# out1 should be [True]
out1 = paddle.any(x) # [True]
print(out1)
# out2 should be [True, True]
out2 = paddle.any(x, axis=0) # [True, True]
print(out2)
# keep_dim=False, out3 should be [True, True], out.shape should be (2,)
out3 = paddle.any(x, axis=-1) # [True, True]
print(out3)
# keep_dim=True, result should be [[True], [True]], out.shape should be (2,1)
out4 = paddle.any(x, axis=1, keepdim=True)
out4 = paddle.cast(out4, 'int32') # [[True], [True]]
print(out4)
"""
if axis is not None and not isinstance(axis, (list, tuple)):
axis = [axis]
if not axis:
reduce_all_flag = True
else:
if len(axis) == len(x.shape):
reduce_all_flag = True
else:
reduce_all_flag = False
if in_dygraph_mode():
axis = axis if axis != None and axis != [] else [0]
return _C_ops.reduce_any(x, 'dim', axis, 'keep_dim', keepdim,
'reduce_all', reduce_all_flag)
attrs = {
'dim': axis if axis != None and axis != [] and axis != () else [0],
'keep_dim': keepdim,
'reduce_all': reduce_all_flag
}
check_variable_and_dtype(x, 'x', ['bool'], 'any')
check_type(axis, 'axis', (int, list, tuple, type(None)), 'any')
helper = LayerHelper('any', **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type='reduce_any',
inputs={'X': x},
outputs={'Out': out},
attrs=attrs)
return out
def broadcast_shape(x_shape, y_shape):
"""
The function returns the shape of doing operation with broadcasting on tensors of x_shape and y_shape, please refer to :ref:`user_guide_broadcasting` for more details.
Args:
x_shape (list[int]|tuple[int]): A shape of tensor.
y_shape (list[int]|tuple[int]): A shape of tensor.
Returns:
list[int], the result shape.
Examples:
.. code-block:: python
import paddle
shape = paddle.broadcast_shape([2, 1, 3], [1, 3, 1])
# [2, 3, 3]
# shape = paddle.broadcast_shape([2, 1, 3], [3, 3, 1])
# ValueError (terminated with error message).
"""
return core.broadcast_shape(x_shape, y_shape)
def conj(x, name=None):
r"""
This function computes the conjugate of the Tensor elementwisely.
Args:
x (Tensor): The input tensor which hold the complex numbers.
Optional data types are: complex64, complex128, float32, float64, int32 or int64.
name (str, optional): The default value is None. Normally there is no need for
user to set this property. For more information, please refer to :ref:`api_guide_Name`
Returns:
out (Tensor): The conjugate of input. The shape and data type is the same with input.
If the elements of tensor is real type such as float32, float64, int32 or int64, the out is the same with input.
Examples:
.. code-block:: python
import paddle
data=paddle.to_tensor([[1+1j, 2+2j, 3+3j], [4+4j, 5+5j, 6+6j]])
#Tensor(shape=[2, 3], dtype=complex64, place=CUDAPlace(0), stop_gradient=True,
# [[(1+1j), (2+2j), (3+3j)],
# [(4+4j), (5+5j), (6+6j)]])
conj_data=paddle.conj(data)
#Tensor(shape=[2, 3], dtype=complex64, place=CUDAPlace(0), stop_gradient=True,
# [[(1-1j), (2-2j), (3-3j)],
# [(4-4j), (5-5j), (6-6j)]])
"""
if in_dygraph_mode():
return _C_ops.conj(x)
check_variable_and_dtype(x, "x", ['complex64', 'complex128', 'float32', 'float64', 'int32', 'int64'], 'conj')
helper = LayerHelper('conj', **locals())
out = helper.create_variable_for_type_inference(
dtype=helper.input_dtype())
helper.append_op(type='conj', inputs={'X': x}, outputs={'Out': [out]})
return out
def digamma(x, name=None):
r"""
Calculates the digamma of the given input tensor, element-wise.
.. math::
Out = \Psi(x) = \frac{ \Gamma^{'}(x) }{ \Gamma(x) }
Args:
x (Tensor): Input Tensor. Must be one of the following types: float32, float64.
name(str, optional): The default value is None. Normally there is no need for
user to set this property. For more information, please refer to :ref:`api_guide_Name`
Returns:
Tensor, the digamma of the input Tensor, the shape and data type is the same with input.
Examples:
.. code-block:: python
import paddle
data = paddle.to_tensor([[1, 1.5], [0, -2.2]], dtype='float32')
res = paddle.digamma(data)
print(res)
# Tensor(shape=[2, 2], dtype=float32, place=CUDAPlace(0), stop_gradient=True,
# [[-0.57721591, 0.03648996],
# [ nan , 5.32286835]])
"""
if in_dygraph_mode():
return _C_ops.digamma(x)
check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'digamma')
helper = LayerHelper('digamma', **locals())
out = helper.create_variable_for_type_inference(x.dtype)
helper.append_op(type='digamma', inputs={'X': x}, outputs={'Out': out})
return out
def neg(x, name=None):
"""
This function computes the negative of the Tensor elementwisely.
Args:
x (Tensor): Input of neg operator, an N-D Tensor, with data type float32, float64, int8, int16, int32, or int64.
name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
Returns:
out (Tensor): The negative of input Tensor. The shape and data type are the same with input Tensor.
Examples:
.. code-block:: python
import paddle
x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
out = paddle.neg(x)
print(out)
# [0.4 0.2 -0.1 -0.3]
"""
return layers.scale(x, scale=-1.0, bias=0.0, bias_after_scale=True, act=None, name=name)
def atan2(x, y, name=None):
r"""
Element-wise arctangent of x/y with consideration of the quadrant.
Equation:
.. math::
atan2(x,y)=\left\{\begin{matrix}
& tan^{-1}(\frac{x}{y}) & y > 0 \\
& tan^{-1}(\frac{x}{y}) + \pi & x>=0, y < 0 \\
& tan^{-1}(\frac{x}{y}) - \pi & x<0, y < 0 \\
& +\frac{\pi}{2} & x>0, y = 0 \\
& -\frac{\pi}{2} & x<0, y = 0 \\
&\text{undefined} & x=0, y = 0
\end{matrix}\right.
Args:
x (Tensor): An N-D Tensor, the data type is int32, int64, float16, float32, float64.
y (Tensor): An N-D Tensor, must have the same type as `x`.
name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
Returns:
out (Tensor): An N-D Tensor, the shape and data type is the same with input (The output data type is float64 when the input data type is int).
Examples:
.. code-block:: python
import paddle
x = paddle.to_tensor([-1, +1, +1, -1]).astype('float32')
#Tensor(shape=[4], dtype=float32, place=CUDAPlace(0), stop_gradient=True,
# [-1, 1, 1, -1])
y = paddle.to_tensor([-1, -1, +1, +1]).astype('float32')
#Tensor(shape=[4], dtype=float32, place=CUDAPlace(0), stop_gradient=True,
# [-1, -1, 1, 1])
out = paddle.atan2(x, y)
#Tensor(shape=[4], dtype=float32, place=CUDAPlace(0), stop_gradient=True,
# [-2.35619450, 2.35619450, 0.78539819, -0.78539819])
"""
if in_dygraph_mode():
return _C_ops.atan2(x, y)
else:
check_variable_and_dtype(x, 'x', ['int32', 'int64', 'float16', 'float32', 'float64'], 'atan2')
check_variable_and_dtype(y, 'y', ['int32', 'int64', 'float16', 'float32', 'float64'], 'atan2')
helper = LayerHelper('atan2', **locals())
inputs = {'X1' : x, 'X2' : y}
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type='atan2', inputs=inputs, outputs={'Out': out})
return out
def lerp(x, y, weight, name=None):
r"""
Does a linear interpolation between x and y based on weight.
Equation:
.. math::
lerp(x, y, weight) = x + weight * (y - x).
Args:
x (Tensor): An N-D Tensor, the data type is float32, float64.
y (Tensor): An N-D Tensor, the data type is float32, float64.
weight (float|Tensor): the weight for the interpolation formula.
name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
Returns:
out (Tensor): An N-D Tensor, the shape and data type is the same with input.
Example:
.. code-block:: python
import paddle
x = paddle.arange(1., 5., dtype='float32')
y = paddle.empty([4], dtype='float32')
y.fill_(10.)
out = paddle.lerp(start, end, 0.5)
# out: [5.5., 6., 6.5, 7.]
"""
if in_dygraph_mode():
check_type(weight, 'weight', (float, paddle.Tensor, Variable), 'lerp')
if isinstance(weight, float):
weight = paddle.to_tensor(weight, dtype=x.dtype)
return _C_ops.lerp(x, y, weight)
check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'lerp')
check_variable_and_dtype(y, 'y', ['float32', 'float64'], 'lerp')
check_variable_and_dtype(weight, 'weight', ['float32', 'float64'], 'lerp')
helper = LayerHelper('lerp', **locals())
inputs = {'X': x, 'Y': y, 'Weight': weight}
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(type='lerp', inputs=inputs, outputs={'Out': out})
return out
@inplace_apis_in_dygraph_only
def lerp_(x, y, weight, name=None):
r"""
Inplace version of ``lerp`` API, the output Tensor will be inplaced with input ``x``.
Please refer to :ref:`api_tensor_lerp`.
"""
out_shape = broadcast_shape(x.shape, y.shape)
check_type(weight, 'weight', (float, paddle.Tensor, Variable), 'lerp')
if isinstance(weight, float):
weight = paddle.to_tensor([weight], dtype=x.dtype)
elif isinstance(weight, (paddle.Tensor, Variable)):
out_shape = broadcast_shape(out_shape, weight.shape)
if out_shape != x.shape:
raise ValueError("The shape of broadcast output {} is different from that of inplace tensor {} in the Inplace operation.".format(out_shape, x.shape))
return _C_ops.lerp_(x, y, weight)
def rad2deg(x, name=None):
"""
Convert each of the elements of input x from angles in radians to degrees.
Equation:
.. math::
rad2deg(x)=180/ \pi * x
Args:
x (Tensor): An N-D Tensor, the data type is float32, float64, int32, int64.
name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
Returns:
out (Tensor): An N-D Tensor, the shape and data type is the same with input (The output data type is float32 when the input data type is int).
Examples:
.. code-block:: python
import paddle
import numpy as np
x1 = paddle.to_tensor([3.142, -3.142, 6.283, -6.283, 1.570, -1.570])
result1 = paddle.rad2deg(x1)
print(result1)
# Tensor(shape=[6], dtype=float32, place=CUDAPlace(0), stop_gradient=True,
# [180.02334595, -180.02334595, 359.98937988, -359.98937988,
# 9.95437622 , -89.95437622])
x2 = paddle.to_tensor(np.pi/2)
result2 = paddle.rad2deg(x2)
print(result2)
# Tensor(shape=[1], dtype=float32, place=CUDAPlace(0), stop_gradient=True,
# [90.])
x3 = paddle.to_tensor(1)
result3 = paddle.rad2deg(x3)
print(result3)
# Tensor(shape=[1], dtype=float32, place=CUDAPlace(0), stop_gradient=True,
# [57.29578018])
"""
rad2deg_scale = 180 / np.pi
if in_dygraph_mode():
if convert_dtype(x.dtype) in ['int32', 'int64']:
x = cast(x, dtype="float32")
return _C_ops.scale(x, 'scale', rad2deg_scale)
else:
check_variable_and_dtype(x, 'x', ['int32', 'int64', 'float32', 'float64'], 'rad2deg')
helper = LayerHelper('rad2deg', **locals())
out_cast = x
if convert_dtype(x.dtype) in ['int32', 'int64']:
out_cast = helper.create_variable_for_type_inference(dtype=paddle.float32)
helper.append_op(
type='cast', inputs={'X':x}, outputs={'Out': out_cast}, attrs={'in_dtype': x.dtype,'out_dtype': paddle.float32})
out = helper.create_variable_for_type_inference(dtype=out_cast.dtype)
helper.append_op(
type='scale', inputs={'X':out_cast}, outputs={'Out': out}, attrs={'scale': rad2deg_scale})
return out
def deg2rad(x, name=None):
"""
Convert each of the elements of input x from degrees to angles in radians.
Equation:
.. math::
deg2rad(x)=\pi * x / 180
Args:
x (Tensor): An N-D Tensor, the data type is float32, float64, int32, int64.
name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
Returns:
out (Tensor): An N-D Tensor, the shape and data type is the same with input (The output data type is float32 when the input data type is int).
Examples:
.. code-block:: python
import paddle
import numpy as np
x1 = paddle.to_tensor([180.0, -180.0, 360.0, -360.0, 90.0, -90.0])
result1 = paddle.deg2rad(x1)
print(result1)
# Tensor(shape=[6], dtype=float32, place=CUDAPlace(0), stop_gradient=True,
# [3.14159274, -3.14159274, 6.28318548, -6.28318548, 1.57079637,
# -1.57079637])
x2 = paddle.to_tensor(180)
result2 = paddle.deg2rad(x2)
print(result2)
# Tensor(shape=[1], dtype=float32, place=CUDAPlace(0), stop_gradient=True,
# [3.14159274])
"""
deg2rad_scale = np.pi / 180.0
if in_dygraph_mode():
if convert_dtype(x.dtype) in ['int32', 'int64']:
x = cast(x, dtype="float32")
return _C_ops.scale(x, 'scale', deg2rad_scale)
else:
check_variable_and_dtype(x, 'x', ['int32', 'int64', 'float32', 'float64'], 'deg2rad')
helper = LayerHelper('deg2rad', **locals())
out_cast = x
if convert_dtype(x.dtype) in ['int32', 'int64']:
out_cast = helper.create_variable_for_type_inference(dtype=paddle.float32)
helper.append_op(
type='cast', inputs={'X':x}, outputs={'Out': out_cast}, attrs={'in_dtype': x.dtype,'out_dtype': paddle.float32})
out = helper.create_variable_for_type_inference(dtype=out_cast.dtype)
helper.append_op(
type='scale', inputs={'X':out_cast}, outputs={'Out': out}, attrs={'scale': deg2rad_scale})
return out
def diff(x, n=1, axis=-1, prepend=None, append=None, name=None):
r"""
Computes the n-th forward difference along the given axis.
The first-order differences is computed by using the following formula:
.. math::
out[i] = x[i+1] - x[i]
Higher-order differences are computed by using paddle.diff() recursively.
Only n=1 is currently supported.
Args:
x(Tensor): The input tensor to compute the forward difference on
n(int, optional): The number of times to recursively compute the difference.
Only support n=1. Default:1
axis(int, optional): The axis to compute the difference along. Default:-1
prepend(Tensor, optional): The tensor to prepend to input along axis before computing the difference.
It's dimensions must be equivalent to that of x,
and its shapes must match x's shape except on axis.
append(Tensor, optional): The tensor to append to input along axis before computing the difference,
It's dimensions must be equivalent to that of x,
and its shapes must match x's shape except on axis.
name(str|None): A name for this layer(optional). If set None,
the layer will be named automatically.
Returns:
Tensor: The output tensor with same dtype with x.
Examples:
.. code-block:: python
import paddle
x = paddle.to_tensor([1, 4, 5, 2])
out = paddle.diff(x)
print(out)
# out:
# [3, 1, -3]
y = paddle.to_tensor([7, 9])
out = paddle.diff(x, append=y)
print(out)
# out:
# [3, 1, -3, 5, 2]
z = paddle.to_tensor([[1, 2, 3], [4, 5, 6]])
out = paddle.diff(z, axis=0)
print(out)
# out:
# [[3, 3, 3]]
out = paddle.diff(z, axis=1)
print(out)
# out:
# [[1, 1], [1, 1]]
"""
if axis < 0:
axis = axis + len(x.shape)
if axis > len(x.shape):
axis = len(x.shape)
if axis < 0:
axis = 0
dtype = x.dtype
axes = [axis]
infer_flags = list(1 for i in range(len(axes)))
if in_dygraph_mode():
has_pend = False
input_list = []
if prepend is not None and append is not None:
input_list = [prepend, x, append]
has_pend = True
elif prepend is not None:
input_list = [prepend, x]
has_pend = True
elif append is not None:
input_list = [x, append]
has_pend = True
if has_pend:
new_input = _C_ops.concat(input_list, 'axis', axis)
else:
new_input = x
attrs_1 = ()
attrs_2 = ()
dim_len = new_input.shape[axis]
starts_1 = [0]
attrs_1 += ('starts', starts_1)
ends_1 = [dim_len - 1]
attrs_1 += ('ends', ends_1)
input_front = _C_ops.slice(new_input, None, None, 'axes', axes, \
'infer_flags', infer_flags, *attrs_1)
starts_2 = [1]
attrs_2 += ('starts', starts_2)
ends_2 = [dim_len]
attrs_2 += ('ends', ends_2)
input_back = _C_ops.slice(new_input, None, None, 'axes', axes, \
'infer_flags', infer_flags, *attrs_2)
if x.dtype == paddle.bool:
op = getattr(_C_ops, "logical_xor")
out = op(input_back, input_front)
else:
out = layers.elementwise_sub(input_back, input_front, axis=axis)
return out
else:
check_variable_and_dtype(x, 'x', ['float32', 'float64', 'bool', 'int32', 'int64'], 'diff')
check_type(axis, 'axis', (int), 'diff')
helper = LayerHelper('diff', **locals())
has_pend = False
input_list = []
if prepend is not None and append is not None:
input_list = [prepend, x, append]
has_pend = True
elif prepend is not None:
input_list = [prepend, x]
has_pend = True
elif append is not None:
input_list = [x, append]
has_pend = True
if has_pend:
new_input = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type='concat', inputs={'X': input_list}, outputs={'Out': [new_input]}, attrs={'axis': axis}
)
else:
new_input = x
dim_len = new_input.shape[axis]
attrs_1 = {'axes': axes}
starts_1 = [0]
ends_1 = [dim_len - 1]
attrs_1['starts'] = starts_1
attrs_1['ends'] = ends_1
input_front = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type='slice', inputs={'Input': new_input}, attrs=attrs_1, outputs={'Out': input_front}
)
attrs_2 = {'axes': axes}
starts_2 = [1]
ends_2 = [dim_len]
attrs_2['starts'] = starts_2
attrs_2['ends'] = ends_2
input_back = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type='slice', inputs={'Input': new_input}, attrs=attrs_2, outputs={'Out': input_back}
)
if dtype == paddle.bool:
out = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type='logical_xor', inputs={"X": input_back, "Y": input_front}, outputs={"Out": out}
)
else:
out = layers.elementwise_sub(input_back, input_front, axis=axis)
return out
def angle(x, name=None):
r"""
Element-wise angle of complex numbers. For non-negative real numbers, the angle is 0 while
for negative real numbers, the angle is :math:`\pi`.
Equation:
.. math::
angle(x)=arctan2(x.imag, x.real)
Args:
x (Tensor): An N-D Tensor, the data type is complex64, complex128, or float32, float64 .
name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
Returns:
out (Tensor): y (Tensor): An N-D Tensor of real data type with the same precision as that of x's data type.
Examples:
.. code-block:: python
import paddle
x = paddle.to_tensor([-2, -1, 0, 1]).unsqueeze(-1).astype('float32')
y = paddle.to_tensor([-2, -1, 0, 1]).astype('float32')
z = x + 1j * y
print(z.numpy())
# [[-2.-2.j -2.-1.j -2.+0.j -2.+1.j]
# [-1.-2.j -1.-1.j -1.+0.j -1.+1.j]
# [ 0.-2.j 0.-1.j 0.+0.j 0.+1.j]
# [ 1.-2.j 1.-1.j 1.+0.j 1.+1.j]]
theta = paddle.angle(z)
print(theta.numpy())
# [[-2.3561945 -2.6779451 3.1415927 2.6779451]
# [-2.0344439 -2.3561945 3.1415927 2.3561945]
# [-1.5707964 -1.5707964 0. 1.5707964]
# [-1.1071488 -0.7853982 0. 0.7853982]]
"""
if in_dygraph_mode():
return _C_ops.angle(x)
check_variable_and_dtype(x, 'x',
['float32', 'float64', 'complex64', 'complex128'], 'angle')
op_type = "angle"
helper = LayerHelper(op_type, **locals())
inputs = {"X": x}
out = helper.create_variable_for_type_inference(
dtype=_complex_to_real_dtype(x.dtype))
outputs = {"Out": out}
helper.append_op(type=op_type, inputs=inputs, outputs=outputs)
return out
| 37.253577
| 291
| 0.570993
|
from __future__ import print_function
import numpy as np
from paddle.common_ops_import import VarDesc
from paddle.common_ops_import import dygraph_only
from paddle.common_ops_import import OpProtoHolder
from paddle.common_ops_import import templatedoc
from paddle.common_ops_import import dygraph_utils
from paddle.tensor import cast
from paddle.tensor.attribute import _complex_to_real_dtype
import paddle
from ..fluid import layers
from ..fluid.framework import core, _varbase_creator, in_dygraph_mode, Variable, convert_np_dtype_to_dtype_
from ..fluid.layer_helper import LayerHelper
from ..fluid.data_feeder import check_variable_and_dtype, check_type, check_dtype, convert_dtype
from ..fluid.layers.layer_function_generator import _generate_doc_string_, generate_activation_fn, generate_layer_fn
from ..fluid.dygraph.inplace_utils import inplace_apis_in_dygraph_only
from ..fluid.layers import abs
from ..fluid.layers import acos
from ..fluid.layers import asin
from ..fluid.layers import ceil
from ..fluid.layers import ceil_
from ..fluid.layers import cos
from ..fluid.layers import tan
from ..fluid.layers import sinh
from ..fluid.layers import cosh
from ..fluid.layers import exp
from ..fluid.layers import exp_
from ..fluid.layers import expm1
from ..fluid.layers import floor
from ..fluid.layers import floor_
from ..fluid.layers import log
from ..fluid.layers import reciprocal
from ..fluid.layers import reciprocal_
from ..fluid.layers import round
from ..fluid.layers import round_
from ..fluid.layers import rsqrt
from ..fluid.layers import rsqrt_
from ..fluid.layers import scale
from ..fluid.layers import square
from ..fluid.layers import stanh
from ..fluid.layers import atan
from ..fluid.layers import erf
from ..fluid.layers import sqrt
from ..fluid.layers import sqrt_
from ..fluid.layers import sin
from ..fluid.layers import lgamma
from ..fluid.layers import multiplex
from ..fluid import layers
from paddle import _C_ops
__all__ = []
_supported_int_dtype_ = [
VarDesc.VarType.UINT8,
VarDesc.VarType.INT8,
VarDesc.VarType.INT16,
VarDesc.VarType.INT32,
VarDesc.VarType.INT64,
]
_supported_float_dtype_ = [
VarDesc.VarType.FP32,
VarDesc.VarType.FP64,
]
@inplace_apis_in_dygraph_only
def scale_(x, scale=1.0, bias=0.0, bias_after_scale=True, act=None, name=None):
_scale = scale.numpy().item(0) if isinstance(scale, Variable) else scale
return _C_ops.scale_(x, 'scale',
float(_scale), 'bias',
float(bias), 'bias_after_scale', bias_after_scale)
def pow(x, y, name=None):
if in_dygraph_mode():
if isinstance(y, (int, float)):
return _C_ops.pow(x, 'factor', y)
elif isinstance(y, (paddle.Tensor, Variable)):
return _elementwise_op_in_dygraph(
x, y, axis=-1, act=None, op_name='elementwise_pow')
else:
raise TypeError('y must be scalar or tensor type, but received: %s '% (y.dtype))
else:
if isinstance(y, (int, float)):
helper = LayerHelper('pow', **locals())
inputs = {'X': x}
attrs = {'factor': y}
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type='pow', inputs=inputs, outputs={'Out': out}, attrs=attrs)
return out
elif isinstance(y, (paddle.Tensor, Variable)):
helper = LayerHelper('elementwise_pow', **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
return _elementwise_op(LayerHelper('elementwise_pow', **locals()))
else:
raise TypeError('y must be scalar or tensor type, but received: %s '% (type(y)))
@dygraph_only
def _elementwise_op_in_dygraph(x,
y,
axis=-1,
act=None,
use_mkldnn=False,
op_name=None):
op = getattr(_C_ops, op_name)
out = op(x, y, 'axis', axis, 'use_mkldnn', use_mkldnn)
return dygraph_utils._append_activation_in_dygraph(
out, act, use_mkldnn=use_mkldnn)
def _elementwise_op(helper):
op_type = helper.layer_type
original_op_type = helper.kwargs.get('original_op_type', op_type)
x = helper.kwargs.get('x', None)
y = helper.kwargs.get('y', None)
out = helper.kwargs.get('out', None)
assert x is not None, 'x cannot be None in {}'.format(original_op_type)
assert y is not None, 'y cannot be None in {}'.format(original_op_type)
check_variable_and_dtype(
x, 'x', ['float16', 'float32', 'float64', 'int32', 'int64', 'bool'],
original_op_type)
check_variable_and_dtype(
y, 'y', ['float16', 'float32', 'float64', 'int32', 'int64', 'bool'],
original_op_type)
axis = helper.kwargs.get('axis', -1)
use_mkldnn = helper.kwargs.get('use_mkldnn', False)
name = helper.kwargs.get('name', None)
if out is None:
if name is None:
out = helper.create_variable_for_type_inference(dtype=x.dtype)
else:
out = helper.create_variable(name=name, dtype=x.dtype, persistable=False)
helper.append_op(
type=op_type,
inputs={'X': x,
'Y': y},
outputs={'Out': out},
attrs={'axis': axis,
'use_mkldnn': use_mkldnn})
return helper.append_activation(out)
def add(x, y, name=None):
if in_dygraph_mode():
return _C_ops.elementwise_add(x, y)
return _elementwise_op(LayerHelper('elementwise_add', **locals()))
@inplace_apis_in_dygraph_only
def add_(x, y, name=None):
op_type = 'elementwise_add_'
axis = -1
out_shape = broadcast_shape(x.shape, y.shape)
if out_shape != x.shape:
raise ValueError("The shape of broadcast output {} is different from that of inplace tensor {} in the Inplace operation.".format(out_shape, x.shape))
out = _elementwise_op_in_dygraph(
x, y, axis=axis, op_name=op_type)
return out
def subtract(x, y, name=None):
op_type = 'elementwise_sub'
axis = -1
act = None
if in_dygraph_mode():
return _elementwise_op_in_dygraph(
x, y, axis=axis, act=act, op_name=op_type)
return _elementwise_op(LayerHelper(op_type, **locals()))
@inplace_apis_in_dygraph_only
def subtract_(x, y, name=None):
axis = -1
act = None
out_shape = broadcast_shape(x.shape, y.shape)
if out_shape != x.shape:
raise ValueError("The shape of broadcast output {} is different from that of inplace tensor {} in the Inplace operation.".format(out_shape, x.shape))
out = _elementwise_op_in_dygraph(
x, y, axis=axis, act=act, op_name='elementwise_sub_')
return out
def divide(x, y, name=None):
op_type = 'elementwise_div'
axis = -1
act = None
if in_dygraph_mode():
return _elementwise_op_in_dygraph(
x, y, axis=axis, act=act, op_name=op_type)
return _elementwise_op(LayerHelper(op_type, **locals()))
def floor_divide(x, y, name=None):
op_type = 'elementwise_floordiv'
axis = -1
if in_dygraph_mode():
return _elementwise_op_in_dygraph(
x, y, axis=axis, op_name=op_type)
return _elementwise_op(LayerHelper(op_type, **locals()))
def remainder(x, y, name=None):
op_type = 'elementwise_mod'
axis = -1
if in_dygraph_mode():
return _elementwise_op_in_dygraph(
x, y, axis=axis, op_name=op_type)
return _elementwise_op(LayerHelper(op_type, **locals()))
mod = remainder
floor_mod = remainder
def multiply(x, y, name=None):
op_type = 'elementwise_mul'
act = None
axis = -1
if in_dygraph_mode():
return _elementwise_op_in_dygraph(
x, y, axis=axis, act=act, op_name=op_type)
if x.dtype != y.dtype:
raise TypeError(
'Input tensors must be same type, but received type of x: %s, type of y: %s '
% (x.dtype, y.dtype))
return _elementwise_op(LayerHelper(op_type, **locals()))
def maximum(x, y, name=None):
op_type = 'elementwise_max'
axis = -1
act = None
if in_dygraph_mode():
return _elementwise_op_in_dygraph(
x, y, axis=axis, act=act, op_name=op_type)
return _elementwise_op(LayerHelper(op_type, **locals()))
def minimum(x, y, name=None):
op_type = 'elementwise_min'
axis = -1
act = None
if in_dygraph_mode():
return _elementwise_op_in_dygraph(
x, y, axis=axis, act=act, op_name=op_type)
return _elementwise_op(LayerHelper(op_type, **locals()))
for func in [
add,
multiply
]:
proto_dict = {'add': 'elementwise_add', 'multiply': 'elementwise_mul'}
op_proto = OpProtoHolder.instance().get_op_proto(proto_dict[func.__name__])
additional_args_lines = [
"name (string, optional): Name of the output. \
Default is None. It's used to print debug info for developers. Details: \
:ref:`api_guide_Name` "
]
func.__doc__ = _generate_doc_string_(
op_proto,
additional_args_lines=additional_args_lines,
skip_attrs_set={"x_data_format", "y_data_format", "axis",
"use_quantizer", "mkldnn_data_type", "Scale_x", "Scale_y", "Scale_out"
}) + """\n""" + str(func.__doc__)
def sum(x, axis=None, dtype=None, keepdim=False, name=None):
if axis is not None and not isinstance(axis, (list, tuple)):
axis = [axis]
if not axis:
reduce_all_flag = True
else:
if len(axis) == len(x.shape):
reduce_all_flag = True
else:
reduce_all_flag = False
def get_dtype(x, dtype):
if dtype is not None:
return (True, dtype)
src_type = convert_dtype(x.dtype)
if src_type in ['bool','int32', 'int64']:
return (True, 'int64')
return (False, src_type)
dtype_flag, dtype = get_dtype(x, dtype)
if in_dygraph_mode():
axis = axis if axis != None and axis != [] else [0]
if dtype_flag:
return _C_ops.reduce_sum(x, 'dim', axis, 'keep_dim', keepdim,
'reduce_all', reduce_all_flag, 'in_dtype',
x.dtype, 'out_dtype',
convert_np_dtype_to_dtype_(dtype))
else:
return _C_ops.reduce_sum(x, 'dim', axis, 'keep_dim', keepdim,
'reduce_all', reduce_all_flag)
attrs = {
'dim': axis if axis != None and axis != [] and axis != () else [0],
'keep_dim': keepdim,
'reduce_all': reduce_all_flag
}
if dtype_flag:
attrs.update({
'in_dtype': x.dtype,
'out_dtype': convert_np_dtype_to_dtype_(dtype)
})
check_variable_and_dtype(
x, 'x', ['bool', 'float16', 'float32', 'float64',
'int32', 'int64', 'complex64', 'complex128',
u'bool', u'float16', u'float32', u'float64',
u'int32', u'int64', u'complex64', u'complex128'], 'sum')
check_type(axis, 'axis', (int, list, tuple, type(None)), 'sum')
helper = LayerHelper('sum', **locals())
if dtype_flag:
out = helper.create_variable_for_type_inference(
dtype=convert_np_dtype_to_dtype_(dtype))
else:
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type='reduce_sum',
inputs={'X': x},
outputs={'Out': out},
attrs=attrs)
return out
@templatedoc(op_type="sum")
def add_n(inputs, name=None):
if in_dygraph_mode():
if isinstance(inputs, Variable):
inputs = [inputs]
return _C_ops.sum(inputs, 'use_mkldnn', False)
helper = LayerHelper('add_n', **locals())
check_type(inputs, 'inputs', (Variable, tuple, list), 'add_n')
if isinstance(inputs, list) or isinstance(inputs, tuple):
if len(inputs) > 0:
for input in inputs:
check_variable_and_dtype(input, "inputs", \
['float32', 'float64', 'int32', 'int64'], 'add_n')
else:
check_variable_and_dtype(inputs, "inputs", \
['float32', 'float64', 'int32', 'int64'], 'add_n')
out = helper.create_variable_for_type_inference(
dtype=helper.input_dtype('inputs'))
helper.append_op(
type='sum',
inputs={'X': inputs},
outputs={'Out': out},
attrs={'use_mkldnn': False})
return out
def trunc(input, name=None):
if in_dygraph_mode():
return _C_ops.trunc(input)
else:
inputs = {"X": input}
attrs = {}
helper = LayerHelper("trunc", **locals())
check_variable_and_dtype(input, 'X', ['int32', 'int64', 'float32', 'float64'], 'trunc')
out = helper.create_variable_for_type_inference(dtype=input.dtype)
helper.append_op(
type="trunc", inputs=inputs, attrs=attrs, outputs={"Out": out})
return out
def mm(input, mat2, name=None):
if in_dygraph_mode():
return _C_ops.matmul_v2(input, mat2)
def __check_input(x, y):
var_names = {'x': x, 'y': y}
for name, val in var_names.items():
check_variable_and_dtype(val, name,
['float16', 'float32', 'float64'], 'mm')
x_shape = list(x.shape)
y_shape = list(y.shape)
if len(x_shape) == 1:
x_shape = [1] + x_shape
if len(y_shape) == 1:
y_shape = y_shape + [1]
# check the inner 2 dimensions
if x_shape[-1] != y_shape[-2]:
if not ((x_shape[-1] == -1) or (y_shape[-2] == -1)):
raise ValueError(
"After performing an optional transpose, Input X's width should be "
"equal to Y's width for multiplication "
"prerequisites. But received X's shape: %s, Y's shape: %s\n"
% (x_shape, y_shape))
if len(y_shape) > 2 and len(x_shape) > 2:
for i, dim_x in enumerate(x_shape[:-2]):
# don't check neg shape
if dim_x < 0 or y_shape[i] < 0:
continue
if dim_x != y_shape[i]:
raise ValueError(
"When the matrix is larger than 2 dimensions, the higher "
"dimensional values of the two matrices need to be equal. "
"But received x_shape[%d] != y_shape[%d]. X's shape: %s, "
"Y's shape: %s.\n" % (i, i, x_shape, y_shape))
__check_input(input, mat2)
helper = LayerHelper('mm', **locals())
out = helper.create_variable_for_type_inference(dtype=input.dtype)
helper.append_op(
type='matmul_v2', inputs={'X': input,
'Y': mat2}, outputs={'Out': out})
return out
def addmm(input, x, y, beta=1.0, alpha=1.0, name=None):
input_shape = input.shape
x_shape = x.shape
y_shape = y.shape
if not len(input_shape) == len(x_shape) == len(y_shape) == 2:
raise ValueError("The dimention of input, x, y should be 2 but receive input's shape: {}, x's shape: {}, y's shape: {}".format(input_shape, x_shape, y_shape))
if input_shape[0] != x_shape[0]:
if input_shape[0] != 1:
raise ValueError( "When x's dimension[0] is not equal with input's dimension[0], input's dimension[0] must be 1 but got {}".format(input_shape[0]))
if input_shape[1] != y_shape[1] and input_shape[1] != 1:
raise ValueError( "When y's dimension[1] is not equal with input's dimension[1], input's dimension[1] must be 1 but got {}".format(input_shape[1]))
if input_shape[1] != y_shape[1]:
if input_shape[1] != 1:
raise ValueError( "When y's dimension[1] is not equal with input's dimension[1], input's dimension[1] must be 1 but got {}".format(input_shape[1]))
if input_shape[0] != x_shape[0] and input_shape[0] != 1:
raise ValueError( "When x's dimension[0] is not equal with input's dimension[0], input's dimension[0] must be 1 but got {}".format(input_shape[0]))
if x_shape[1] != y_shape[0]:
raise ValueError("The input Variable x's width must be equal with Variable y' height. But received x's shape = {}, y's shape = {}.".format(x_shape, y_shape))
if in_dygraph_mode():
out = _C_ops.addmm(input, x, y, "Alpha", alpha, "Beta", beta)
return out
inputs = {'Input': input, "X": x, "Y": y}
attrs = {'Alpha': alpha, 'Beta': beta}
helper = LayerHelper("addmm", **locals())
check_variable_and_dtype(input, 'Input', ['float32', 'float64'], 'addmm')
check_variable_and_dtype(x, 'X', ['float32', 'float64'], 'addmm')
check_variable_and_dtype(y, 'Y', ['float32', 'float64'], 'addmm')
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type="addmm", inputs=inputs, attrs=attrs, outputs={"Out": out})
return out
def logsumexp(x, axis=None, keepdim=False, name=None):
if isinstance(axis, int):
axis = [axis]
reduce_all = True if axis is None \
or len(axis)==0 \
or len(axis) == len(x.shape) else False
if axis is None or len(axis) == 0:
axis = [0]
if in_dygraph_mode():
return _C_ops.logsumexp(x, 'axis', axis, 'keepdim', keepdim, 'reduce_all', reduce_all)
check_variable_and_dtype(x, 'x',
['float32', 'float64'],
'logsumexp')
helper = LayerHelper('logsumexp', **locals())
attrs = {'axis': axis, 'keepdim': keepdim, 'reduce_all':reduce_all}
out = helper.create_variable_for_type_inference(x.dtype)
helper.append_op(
type='logsumexp', inputs={'X': x}, outputs={'Out': out}, attrs=attrs)
return out
def inverse(x, name=None):
if in_dygraph_mode():
return _C_ops.inverse(x)
def _check_input(x):
check_variable_and_dtype(x, 'x',
['float32', 'float64'], 'inverse')
if len(x.shape) < 2:
raise ValueError(
"The input of inverse is expected to be a Tensor whose number "
"of dimensions is no less than 2. But reviced: %d, "
"x's shape: %s." % (len(x.shape), x.shape))
_check_input(x)
helper = LayerHelper('inverse', **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type='inverse', inputs={'Input': [x] }, outputs={'Output': [out]})
return out
def max(x, axis=None, keepdim=False, name=None):
if axis is not None and not isinstance(axis, list):
if isinstance(axis, tuple):
axis = list(axis)
elif isinstance(axis, int):
axis= [axis]
else:
raise TypeError(
"The type of axis must be int, list or tuple, but received {}".format(type(axis)))
reduce_all = True if axis == None or axis == [] else False
axis = axis if axis != None and axis != [] else [0]
if in_dygraph_mode():
return _C_ops.reduce_max(x, 'dim', axis, 'keep_dim', keepdim,
'reduce_all', reduce_all)
helper = LayerHelper('max', **locals())
check_variable_and_dtype(
x, 'x', ['float32', 'float64', 'int32', 'int64'], 'max')
out = helper.create_variable_for_type_inference(
dtype=x.dtype)
helper.append_op(
type='reduce_max',
inputs={'X': x},
outputs={'Out': out},
attrs={
'dim': axis,
'keep_dim': keepdim,
'reduce_all': reduce_all
})
return out
def min(x, axis=None, keepdim=False, name=None):
if axis is not None and not isinstance(axis, list):
if isinstance(axis, tuple):
axis = list(axis)
elif isinstance(axis, int):
axis= [axis]
else:
raise TypeError(
"The type of axis must be int, list or tuple, but received {}".format(type(axis)))
reduce_all = True if axis == None or axis == [] else False
axis = axis if axis != None and axis != [] else [0]
if in_dygraph_mode():
return _C_ops.reduce_min(x, 'dim', axis, 'keep_dim', keepdim,
'reduce_all', reduce_all)
helper = LayerHelper('min', **locals())
check_variable_and_dtype(
x, 'x', ['float32', 'float64', 'int32', 'int64'], 'min')
out = helper.create_variable_for_type_inference(
dtype=x.dtype)
helper.append_op(
type='reduce_min',
inputs={'X': x},
outputs={'Out': out},
attrs={
'dim': axis,
'keep_dim': keepdim,
'reduce_all': reduce_all
})
return out
def log1p(x, name=None):
if in_dygraph_mode():
return _C_ops.log1p(x)
check_variable_and_dtype(x, 'x', ['float32', 'float64'], "log1p")
inputs = {'X': [x]}
helper = LayerHelper('log1p', **locals())
dtype = helper.input_dtype(input_param_name='x')
out = helper.create_variable_for_type_inference(dtype)
helper.append_op(type="log1p", inputs={"X": x}, outputs={"Out": out})
return out
def log2(x, name=None):
if in_dygraph_mode():
return _C_ops.log2(x)
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], "log2")
inputs = {'X': [x]}
helper = LayerHelper('log2', **locals())
dtype = helper.input_dtype(input_param_name='x')
out = helper.create_variable_for_type_inference(dtype)
helper.append_op(type="log2", inputs={"X": x}, outputs={"Out": out})
return out
def log10(x, name=None):
if in_dygraph_mode():
return _C_ops.log10(x)
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], "log10")
inputs = {'X': [x]}
helper = LayerHelper('log10', **locals())
dtype = helper.input_dtype(input_param_name='x')
out = helper.create_variable_for_type_inference(dtype)
helper.append_op(type="log10", inputs={"X": x}, outputs={"Out": out})
return out
def clip(x, min=None, max=None, name=None):
x_dtype = str(x.dtype)
if x_dtype == 'paddle.int32':
min_ = np.iinfo(np.int32).min
max_ = np.iinfo(np.int32).max - 2**7
elif x_dtype == 'paddle.int64':
min_ = np.iinfo(np.int64).min
max_ = np.iinfo(np.int64).max - 2**39
else:
min_ = float(np.finfo(np.float32).min)
max_ = float(np.finfo(np.float32).max)
if in_dygraph_mode():
if isinstance(min, Variable):
min = min.numpy().item(0)
if isinstance(max, Variable):
max = max.numpy().item(0)
min = min_ if min is None else min
max = max_ if max is None else max
return _C_ops.clip(x, "min", min, "max", max)
if min is not None:
check_type(min, 'min', (float, int, Variable), 'clip')
if isinstance(min, Variable):
check_dtype(min.dtype, 'min', ['float32', 'float64', 'int32'],
'clip', '(When the type of min in clip is Variable.)')
if max is not None:
check_type(max, 'max', (float, int, Variable), 'clip')
if isinstance(max, Variable):
check_dtype(max.dtype, 'max', ['float32', 'float64', 'int32'],
'clip', '(When the type of max in clip is Variable.)')
check_variable_and_dtype(x, 'x', ['float32', 'float64', 'int32', 'int64'], 'clip')
inputs = {'X': x}
attrs = {'min': min_, 'max': max_}
if isinstance(min, Variable):
min.stop_gradient = True
inputs['Min'] = min
elif min is not None:
attrs['min'] = min
if isinstance(max, Variable):
max.stop_gradient = True
inputs['Max'] = max
elif max is not None:
attrs['max'] = max
helper = LayerHelper('clip', **locals())
output = helper.create_variable_for_type_inference(
dtype=helper.input_dtype('x'))
helper.append_op(
type='clip', inputs=inputs, outputs={'Out': [output]}, attrs=attrs)
return output
@inplace_apis_in_dygraph_only
def clip_(x, min=None, max=None, name=None):
fmin = float(np.finfo(np.float32).min)
fmax = float(np.finfo(np.float32).max)
if isinstance(min, Variable):
min = min.numpy().item(0)
if isinstance(max, Variable):
max = max.numpy().item(0)
min = fmin if min is None else min
max = fmax if max is None else max
return _C_ops.clip_(x, "min", min, "max", max)
def trace(x, offset=0, axis1=0, axis2=1, name=None):
def __check_input(input, offset, dim1, dim2):
check_dtype(x.dtype, 'Input',
['int32', 'int64', 'float16', 'float32', 'float64'],
'trace')
input_shape = list(x.shape)
assert len(input_shape) >= 2, \
"The x must be at least 2-dimensional, " \
"But received Input x's dimensional: %s.\n" % \
len(input_shape)
axis1_ = axis1 if axis1 >= 0 else len(input_shape) + axis1
axis2_ = axis2 if axis2 >= 0 else len(input_shape) + axis2
assert ((0 <= axis1_) and (axis1_ < len(input_shape))), \
"The argument axis1 is out of range (expected to be in range of [%d, %d], but got %d).\n" \
% (-(len(input_shape)), len(input_shape) - 1, axis1)
assert ((0 <= axis2_) and (axis2_ < len(input_shape))), \
"The argument axis2 is out of range (expected to be in range of [%d, %d], but got %d).\n" \
% (-(len(input_shape)), len(input_shape) - 1, axis2)
assert axis1_ != axis2_, \
"axis1 and axis2 cannot be the same axis." \
"But received axis1 = %d, axis2 = %d\n"%(axis1, axis2)
__check_input(input, offset, axis1, axis2)
if in_dygraph_mode():
return _C_ops.trace(x, 'offset', offset, 'axis1', axis1, 'axis2', axis2)
inputs = {'Input': [x]}
attrs = {'offset': offset, 'axis1': axis1, 'axis2': axis2}
helper = LayerHelper('trace', **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type='trace',
inputs={'Input': [x]},
attrs={'offset': offset,
'axis1': axis1,
'axis2': axis2},
outputs={'Out': [out]})
return out
def diagonal(x, offset=0, axis1=0, axis2=1, name=None):
if in_dygraph_mode():
return _C_ops.diagonal(x, 'offset', offset, 'axis1', axis1, 'axis2', axis2)
def __check_input(input, offset, dim1, dim2):
check_dtype(x.dtype, 'Input',
['bool', 'int32', 'int64', 'float16', 'float32', 'float64'],
'diagonal')
input_shape = list(x.shape)
assert len(input_shape) >= 2, \
"The x must be at least 2-dimensional, " \
"But received Input x's dimensional: %s.\n" % \
len(input_shape)
axis1_ = axis1 if axis1 >= 0 else len(input_shape) + axis1
axis2_ = axis2 if axis2 >= 0 else len(input_shape) + axis2
assert axis1_ < len(input_shape), \
"The argument axis1 is out of range (expected to be in range of [%d, %d], but got %d).\n" \
% (-(len(input_shape)), len(input_shape) - 1, axis1)
assert axis2_ < len(input_shape), \
"The argument axis2 is out of range (expected to be in range of [%d, %d], but got %d).\n" \
% (-(len(input_shape)), len(input_shape) - 1, axis2)
assert axis1_ != axis2_, \
"axis1 and axis2 cannot be the same axis." \
"But received axis1 = %d, axis2 = %d\n"%(axis1, axis2)
__check_input(input, offset, axis1, axis2)
helper = LayerHelper('diagonal', **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type='diagonal',
inputs={'Input': [x]},
attrs={'offset': offset,
'axis1': axis1,
'axis2': axis2},
outputs={'Out': [out]})
return out
@templatedoc(op_type="kron")
def kron(x, y, name=None):
if in_dygraph_mode():
return _C_ops.kron(x, y)
helper = LayerHelper('kron', **locals())
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64', 'int32', 'int64'], 'kron')
check_variable_and_dtype(y, 'y', ['float16', 'float32', 'float64', 'int32', 'int64'], 'kron')
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(type="kron", inputs={"X": x, "Y": y}, outputs={"Out": out})
return out
def cumsum(x, axis=None, dtype=None, name=None):
if axis is None:
flatten = True
else:
flatten = False
if dtype is not None and x.dtype != convert_np_dtype_to_dtype_(dtype):
x = layers.cast(x, dtype)
if in_dygraph_mode():
if axis is None:
return _C_ops.cumsum(x, 'flatten', flatten)
else:
return _C_ops.cumsum(x, 'axis', axis, 'flatten', flatten)
check_type(x, 'x', (Variable), 'cumsum')
locals_var = locals().copy()
kwargs = dict()
for name, val in locals_var.items():
if val is not None:
kwargs[name] = val
_cum_sum_ = generate_layer_fn('cumsum')
return _cum_sum_(**kwargs)
def cumprod(x, dim=None, dtype=None, name=None):
if dtype is not None and x.dtype != convert_np_dtype_to_dtype_(dtype):
x = layers.cast(x, dtype)
if in_dygraph_mode():
return _C_ops.cumprod(x, 'dim', dim)
check_variable_and_dtype(x, "x", ['complex64', 'complex128', 'float32', 'float64', 'int32', 'int64'], 'cumprod')
check_type(dim, 'dim', int, 'cumprod')
helper = LayerHelper('cumprod', **locals())
out = helper.create_variable_for_type_inference(x.dtype)
helper.append_op(type='cumprod', inputs={'X': x}, outputs={'Out': out}, attrs={'dim': dim})
return out
def isfinite(x, name=None):
if in_dygraph_mode():
return _C_ops.isfinite_v2(x)
helper = LayerHelper("isfinite_v2", **locals())
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64', 'int32', 'int64'], 'isfinite')
out = helper.create_variable_for_type_inference('bool')
helper.append_op(type="isfinite_v2", inputs={"X": x}, outputs={"Out": out})
return out
def isinf(x, name=None):
if in_dygraph_mode():
return _C_ops.isinf_v2(x)
helper = LayerHelper("isinf_v2", **locals())
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64', 'int32', 'int64'], 'isinf')
out = helper.create_variable_for_type_inference(dtype='bool')
helper.append_op(type="isinf_v2", inputs={"X": x}, outputs={"Out": out})
return out
def isnan(x, name=None):
if in_dygraph_mode():
return _C_ops.isnan_v2(x)
helper = LayerHelper("isnan_v2", **locals())
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64', 'int32', 'int64'], 'isnan')
out = helper.create_variable_for_type_inference(dtype='bool')
helper.append_op(type="isnan_v2", inputs={"X": x}, outputs={"Out": out})
return out
def prod(x, axis=None, keepdim=False, dtype=None, name=None):
if dtype is not None:
check_dtype(dtype, 'dtype', ['float32', 'float64', 'int32', 'int64'], 'prod')
if x.dtype != convert_np_dtype_to_dtype_(dtype):
x = layers.cast(x, dtype)
return layers.reduce_prod(input=x, dim=axis, keep_dim=keepdim, name=name)
def sign(x, name=None):
if in_dygraph_mode():
return _C_ops.sign(x)
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'sign')
helper = LayerHelper("sign", **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(type='sign', inputs={'X': [x]}, outputs={'Out': [out]})
return out
def tanh(x, name=None):
if in_dygraph_mode():
return _C_ops.tanh(x)
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'tanh')
check_type(x, 'x', (Variable), 'tanh')
helper = LayerHelper('tanh', **locals())
out = helper.create_variable_for_type_inference(x.dtype)
helper.append_op(type='tanh', inputs={'X': x}, outputs={'Out': out})
return out
@inplace_apis_in_dygraph_only
def tanh_(x, name=None):
return _C_ops.tanh_(x)
def increment(x, value=1.0, name=None):
if in_dygraph_mode():
return _C_ops.increment(x, 'step', value)
check_variable_and_dtype(x, 'x', ['float32', 'float64', 'int32', 'int64'],
'increment')
helper = LayerHelper("increment", **locals())
helper.append_op(
type='increment',
inputs={'X': [x]},
outputs={'Out': [x]},
attrs={'step': float(value)})
return x
def all(x, axis=None, keepdim=False, name=None):
if axis is not None and not isinstance(axis, (list, tuple)):
axis = [axis]
if not axis:
reduce_all_flag = True
else:
if len(axis) == len(x.shape):
reduce_all_flag = True
else:
reduce_all_flag = False
if in_dygraph_mode():
axis = axis if axis != None and axis != [] else [0]
return _C_ops.reduce_all(x, 'dim', axis, 'keep_dim', keepdim,
'reduce_all', reduce_all_flag)
attrs = {
'dim': axis if axis != None and axis != [] and axis != () else [0],
'keep_dim': keepdim,
'reduce_all': reduce_all_flag
}
check_variable_and_dtype(x, 'x', ['bool'], 'all')
check_type(axis, 'axis', (int, list, tuple, type(None)), 'all')
helper = LayerHelper('all', **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type='reduce_all',
inputs={'X': x},
outputs={'Out': out},
attrs=attrs)
return out
def any(x, axis=None, keepdim=False, name=None):
if axis is not None and not isinstance(axis, (list, tuple)):
axis = [axis]
if not axis:
reduce_all_flag = True
else:
if len(axis) == len(x.shape):
reduce_all_flag = True
else:
reduce_all_flag = False
if in_dygraph_mode():
axis = axis if axis != None and axis != [] else [0]
return _C_ops.reduce_any(x, 'dim', axis, 'keep_dim', keepdim,
'reduce_all', reduce_all_flag)
attrs = {
'dim': axis if axis != None and axis != [] and axis != () else [0],
'keep_dim': keepdim,
'reduce_all': reduce_all_flag
}
check_variable_and_dtype(x, 'x', ['bool'], 'any')
check_type(axis, 'axis', (int, list, tuple, type(None)), 'any')
helper = LayerHelper('any', **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type='reduce_any',
inputs={'X': x},
outputs={'Out': out},
attrs=attrs)
return out
def broadcast_shape(x_shape, y_shape):
return core.broadcast_shape(x_shape, y_shape)
def conj(x, name=None):
if in_dygraph_mode():
return _C_ops.conj(x)
check_variable_and_dtype(x, "x", ['complex64', 'complex128', 'float32', 'float64', 'int32', 'int64'], 'conj')
helper = LayerHelper('conj', **locals())
out = helper.create_variable_for_type_inference(
dtype=helper.input_dtype())
helper.append_op(type='conj', inputs={'X': x}, outputs={'Out': [out]})
return out
def digamma(x, name=None):
if in_dygraph_mode():
return _C_ops.digamma(x)
check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'digamma')
helper = LayerHelper('digamma', **locals())
out = helper.create_variable_for_type_inference(x.dtype)
helper.append_op(type='digamma', inputs={'X': x}, outputs={'Out': out})
return out
def neg(x, name=None):
return layers.scale(x, scale=-1.0, bias=0.0, bias_after_scale=True, act=None, name=name)
def atan2(x, y, name=None):
if in_dygraph_mode():
return _C_ops.atan2(x, y)
else:
check_variable_and_dtype(x, 'x', ['int32', 'int64', 'float16', 'float32', 'float64'], 'atan2')
check_variable_and_dtype(y, 'y', ['int32', 'int64', 'float16', 'float32', 'float64'], 'atan2')
helper = LayerHelper('atan2', **locals())
inputs = {'X1' : x, 'X2' : y}
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type='atan2', inputs=inputs, outputs={'Out': out})
return out
def lerp(x, y, weight, name=None):
if in_dygraph_mode():
check_type(weight, 'weight', (float, paddle.Tensor, Variable), 'lerp')
if isinstance(weight, float):
weight = paddle.to_tensor(weight, dtype=x.dtype)
return _C_ops.lerp(x, y, weight)
check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'lerp')
check_variable_and_dtype(y, 'y', ['float32', 'float64'], 'lerp')
check_variable_and_dtype(weight, 'weight', ['float32', 'float64'], 'lerp')
helper = LayerHelper('lerp', **locals())
inputs = {'X': x, 'Y': y, 'Weight': weight}
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(type='lerp', inputs=inputs, outputs={'Out': out})
return out
@inplace_apis_in_dygraph_only
def lerp_(x, y, weight, name=None):
out_shape = broadcast_shape(x.shape, y.shape)
check_type(weight, 'weight', (float, paddle.Tensor, Variable), 'lerp')
if isinstance(weight, float):
weight = paddle.to_tensor([weight], dtype=x.dtype)
elif isinstance(weight, (paddle.Tensor, Variable)):
out_shape = broadcast_shape(out_shape, weight.shape)
if out_shape != x.shape:
raise ValueError("The shape of broadcast output {} is different from that of inplace tensor {} in the Inplace operation.".format(out_shape, x.shape))
return _C_ops.lerp_(x, y, weight)
def rad2deg(x, name=None):
rad2deg_scale = 180 / np.pi
if in_dygraph_mode():
if convert_dtype(x.dtype) in ['int32', 'int64']:
x = cast(x, dtype="float32")
return _C_ops.scale(x, 'scale', rad2deg_scale)
else:
check_variable_and_dtype(x, 'x', ['int32', 'int64', 'float32', 'float64'], 'rad2deg')
helper = LayerHelper('rad2deg', **locals())
out_cast = x
if convert_dtype(x.dtype) in ['int32', 'int64']:
out_cast = helper.create_variable_for_type_inference(dtype=paddle.float32)
helper.append_op(
type='cast', inputs={'X':x}, outputs={'Out': out_cast}, attrs={'in_dtype': x.dtype,'out_dtype': paddle.float32})
out = helper.create_variable_for_type_inference(dtype=out_cast.dtype)
helper.append_op(
type='scale', inputs={'X':out_cast}, outputs={'Out': out}, attrs={'scale': rad2deg_scale})
return out
def deg2rad(x, name=None):
deg2rad_scale = np.pi / 180.0
if in_dygraph_mode():
if convert_dtype(x.dtype) in ['int32', 'int64']:
x = cast(x, dtype="float32")
return _C_ops.scale(x, 'scale', deg2rad_scale)
else:
check_variable_and_dtype(x, 'x', ['int32', 'int64', 'float32', 'float64'], 'deg2rad')
helper = LayerHelper('deg2rad', **locals())
out_cast = x
if convert_dtype(x.dtype) in ['int32', 'int64']:
out_cast = helper.create_variable_for_type_inference(dtype=paddle.float32)
helper.append_op(
type='cast', inputs={'X':x}, outputs={'Out': out_cast}, attrs={'in_dtype': x.dtype,'out_dtype': paddle.float32})
out = helper.create_variable_for_type_inference(dtype=out_cast.dtype)
helper.append_op(
type='scale', inputs={'X':out_cast}, outputs={'Out': out}, attrs={'scale': deg2rad_scale})
return out
def diff(x, n=1, axis=-1, prepend=None, append=None, name=None):
if axis < 0:
axis = axis + len(x.shape)
if axis > len(x.shape):
axis = len(x.shape)
if axis < 0:
axis = 0
dtype = x.dtype
axes = [axis]
infer_flags = list(1 for i in range(len(axes)))
if in_dygraph_mode():
has_pend = False
input_list = []
if prepend is not None and append is not None:
input_list = [prepend, x, append]
has_pend = True
elif prepend is not None:
input_list = [prepend, x]
has_pend = True
elif append is not None:
input_list = [x, append]
has_pend = True
if has_pend:
new_input = _C_ops.concat(input_list, 'axis', axis)
else:
new_input = x
attrs_1 = ()
attrs_2 = ()
dim_len = new_input.shape[axis]
starts_1 = [0]
attrs_1 += ('starts', starts_1)
ends_1 = [dim_len - 1]
attrs_1 += ('ends', ends_1)
input_front = _C_ops.slice(new_input, None, None, 'axes', axes, \
'infer_flags', infer_flags, *attrs_1)
starts_2 = [1]
attrs_2 += ('starts', starts_2)
ends_2 = [dim_len]
attrs_2 += ('ends', ends_2)
input_back = _C_ops.slice(new_input, None, None, 'axes', axes, \
'infer_flags', infer_flags, *attrs_2)
if x.dtype == paddle.bool:
op = getattr(_C_ops, "logical_xor")
out = op(input_back, input_front)
else:
out = layers.elementwise_sub(input_back, input_front, axis=axis)
return out
else:
check_variable_and_dtype(x, 'x', ['float32', 'float64', 'bool', 'int32', 'int64'], 'diff')
check_type(axis, 'axis', (int), 'diff')
helper = LayerHelper('diff', **locals())
has_pend = False
input_list = []
if prepend is not None and append is not None:
input_list = [prepend, x, append]
has_pend = True
elif prepend is not None:
input_list = [prepend, x]
has_pend = True
elif append is not None:
input_list = [x, append]
has_pend = True
if has_pend:
new_input = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type='concat', inputs={'X': input_list}, outputs={'Out': [new_input]}, attrs={'axis': axis}
)
else:
new_input = x
dim_len = new_input.shape[axis]
attrs_1 = {'axes': axes}
starts_1 = [0]
ends_1 = [dim_len - 1]
attrs_1['starts'] = starts_1
attrs_1['ends'] = ends_1
input_front = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type='slice', inputs={'Input': new_input}, attrs=attrs_1, outputs={'Out': input_front}
)
attrs_2 = {'axes': axes}
starts_2 = [1]
ends_2 = [dim_len]
attrs_2['starts'] = starts_2
attrs_2['ends'] = ends_2
input_back = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type='slice', inputs={'Input': new_input}, attrs=attrs_2, outputs={'Out': input_back}
)
if dtype == paddle.bool:
out = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type='logical_xor', inputs={"X": input_back, "Y": input_front}, outputs={"Out": out}
)
else:
out = layers.elementwise_sub(input_back, input_front, axis=axis)
return out
def angle(x, name=None):
if in_dygraph_mode():
return _C_ops.angle(x)
check_variable_and_dtype(x, 'x',
['float32', 'float64', 'complex64', 'complex128'], 'angle')
op_type = "angle"
helper = LayerHelper(op_type, **locals())
inputs = {"X": x}
out = helper.create_variable_for_type_inference(
dtype=_complex_to_real_dtype(x.dtype))
outputs = {"Out": out}
helper.append_op(type=op_type, inputs=inputs, outputs=outputs)
return out
| true
| true
|
f7055160e64b5e5b19261e32e1da4a104c9c3cab
| 3,748
|
py
|
Python
|
airtest/core/android/javacap.py
|
koyoki/Airtest
|
ea8391bd4819d9231e7b35f18c14662e6109fad0
|
[
"Apache-2.0"
] | 2
|
2019-12-10T02:36:49.000Z
|
2019-12-19T08:54:40.000Z
|
airtest/core/android/javacap.py
|
koyoki/Airtest
|
ea8391bd4819d9231e7b35f18c14662e6109fad0
|
[
"Apache-2.0"
] | 1
|
2021-10-12T22:51:23.000Z
|
2021-10-12T22:51:23.000Z
|
airtest/core/android/javacap.py
|
koyoki/Airtest
|
ea8391bd4819d9231e7b35f18c14662e6109fad0
|
[
"Apache-2.0"
] | 1
|
2020-12-07T03:40:41.000Z
|
2020-12-07T03:40:41.000Z
|
# -*- coding: utf-8 -*-
from airtest.utils.logger import get_logger
from airtest.utils.safesocket import SafeSocket
from airtest.utils.nbsp import NonBlockingStreamReader
from airtest.utils.snippet import on_method_ready, reg_cleanup
from airtest.core.android.yosemite import Yosemite
import struct
LOGGING = get_logger(__name__)
class Javacap(Yosemite):
"""
This is another screencap class, it is slower in performance than minicap, but it provides the better compatibility
"""
APP_PKG = "com.netease.nie.yosemite"
SCREENCAP_SERVICE = "com.netease.nie.yosemite.Capture"
RECVTIMEOUT = None
def __init__(self, adb):
super(Javacap, self).__init__(adb)
self.frame_gen = None
@on_method_ready('install_or_upgrade')
def _setup_stream_server(self):
"""
Setup stream server
Returns:
adb shell process, non-blocking stream reader and local port
"""
localport, deviceport = self.adb.setup_forward("localabstract:javacap_{}".format)
deviceport = deviceport[len("localabstract:"):]
# setup agent proc
apkpath = self.adb.path_app(self.APP_PKG)
cmds = ["CLASSPATH=" + apkpath, 'exec', 'app_process', '/system/bin', self.SCREENCAP_SERVICE,
"--scale", "100", "--socket", "%s" % deviceport, "-lazy", "2>&1"]
proc = self.adb.start_shell(cmds)
# check proc output
nbsp = NonBlockingStreamReader(proc.stdout, print_output=True, name="javacap_sever")
while True:
line = nbsp.readline(timeout=5.0)
if line is None:
raise RuntimeError("javacap server setup timeout")
if b"Capture server listening on" in line:
break
if b"Address already in use" in line:
raise RuntimeError("javacap server setup error: %s" % line)
reg_cleanup(proc.kill)
return proc, nbsp, localport
def get_frames(self):
"""
Get the screen frames
Returns:
None
"""
proc, nbsp, localport = self._setup_stream_server()
s = SafeSocket()
s.connect((self.adb.host, localport))
t = s.recv(24)
# javacap header
LOGGING.debug(struct.unpack("<2B5I2B", t))
stopping = False
while not stopping:
s.send(b"1")
# recv frame header, count frame_size
if self.RECVTIMEOUT is not None:
header = s.recv_with_timeout(4, self.RECVTIMEOUT)
else:
header = s.recv(4)
if header is None:
LOGGING.error("javacap header is None")
# recv timeout, if not frame updated, maybe screen locked
stopping = yield None
else:
frame_size = struct.unpack("<I", header)[0]
frame_data = s.recv(frame_size)
stopping = yield frame_data
LOGGING.debug("javacap stream ends")
s.close()
nbsp.kill()
proc.kill()
self.adb.remove_forward("tcp:%s" % localport)
def get_frame_from_stream(self):
"""
Get frame from the stream
Returns:
frame
"""
if self.frame_gen is None:
self.frame_gen = self.get_frames()
return self.frame_gen.send(None)
def teardown_stream(self):
"""
End stream
Returns:
None
"""
if not self.frame_gen:
return
try:
self.frame_gen.send(1)
except (TypeError, StopIteration):
pass
else:
LOGGING.warn("%s tear down failed" % self.frame_gen)
self.frame_gen = None
| 31.233333
| 119
| 0.583511
|
from airtest.utils.logger import get_logger
from airtest.utils.safesocket import SafeSocket
from airtest.utils.nbsp import NonBlockingStreamReader
from airtest.utils.snippet import on_method_ready, reg_cleanup
from airtest.core.android.yosemite import Yosemite
import struct
LOGGING = get_logger(__name__)
class Javacap(Yosemite):
APP_PKG = "com.netease.nie.yosemite"
SCREENCAP_SERVICE = "com.netease.nie.yosemite.Capture"
RECVTIMEOUT = None
def __init__(self, adb):
super(Javacap, self).__init__(adb)
self.frame_gen = None
@on_method_ready('install_or_upgrade')
def _setup_stream_server(self):
localport, deviceport = self.adb.setup_forward("localabstract:javacap_{}".format)
deviceport = deviceport[len("localabstract:"):]
apkpath = self.adb.path_app(self.APP_PKG)
cmds = ["CLASSPATH=" + apkpath, 'exec', 'app_process', '/system/bin', self.SCREENCAP_SERVICE,
"--scale", "100", "--socket", "%s" % deviceport, "-lazy", "2>&1"]
proc = self.adb.start_shell(cmds)
nbsp = NonBlockingStreamReader(proc.stdout, print_output=True, name="javacap_sever")
while True:
line = nbsp.readline(timeout=5.0)
if line is None:
raise RuntimeError("javacap server setup timeout")
if b"Capture server listening on" in line:
break
if b"Address already in use" in line:
raise RuntimeError("javacap server setup error: %s" % line)
reg_cleanup(proc.kill)
return proc, nbsp, localport
def get_frames(self):
proc, nbsp, localport = self._setup_stream_server()
s = SafeSocket()
s.connect((self.adb.host, localport))
t = s.recv(24)
LOGGING.debug(struct.unpack("<2B5I2B", t))
stopping = False
while not stopping:
s.send(b"1")
if self.RECVTIMEOUT is not None:
header = s.recv_with_timeout(4, self.RECVTIMEOUT)
else:
header = s.recv(4)
if header is None:
LOGGING.error("javacap header is None")
stopping = yield None
else:
frame_size = struct.unpack("<I", header)[0]
frame_data = s.recv(frame_size)
stopping = yield frame_data
LOGGING.debug("javacap stream ends")
s.close()
nbsp.kill()
proc.kill()
self.adb.remove_forward("tcp:%s" % localport)
def get_frame_from_stream(self):
if self.frame_gen is None:
self.frame_gen = self.get_frames()
return self.frame_gen.send(None)
def teardown_stream(self):
if not self.frame_gen:
return
try:
self.frame_gen.send(1)
except (TypeError, StopIteration):
pass
else:
LOGGING.warn("%s tear down failed" % self.frame_gen)
self.frame_gen = None
| true
| true
|
f705525314573327c75262120641f8403420919a
| 8,991
|
py
|
Python
|
447.number-of-boomerangs.py
|
windard/leeeeee
|
0107a5f95746592ca4fe78d2b5875cf65b1910e7
|
[
"MIT"
] | null | null | null |
447.number-of-boomerangs.py
|
windard/leeeeee
|
0107a5f95746592ca4fe78d2b5875cf65b1910e7
|
[
"MIT"
] | null | null | null |
447.number-of-boomerangs.py
|
windard/leeeeee
|
0107a5f95746592ca4fe78d2b5875cf65b1910e7
|
[
"MIT"
] | null | null | null |
#
# @lc app=leetcode id=447 lang=python
#
# [447] Number of Boomerangs
#
# https://leetcode.com/problems/number-of-boomerangs/description/
#
# algorithms
# Easy (49.20%)
# Likes: 296
# Dislikes: 447
# Total Accepted: 54.7K
# Total Submissions: 109.6K
# Testcase Example: '[[0,0],[1,0],[2,0]]'
#
# Given n points in the plane that are all pairwise distinct, a "boomerang" is
# a tuple of points (i, j, k) such that the distance between i and j equals the
# distance between i and k (the order of the tuple matters).
#
# Find the number of boomerangs. You may assume that n will be at most 500 and
# coordinates of points are all in the range [-10000, 10000] (inclusive).
#
# Example:
#
#
# Input:
# [[0,0],[1,0],[2,0]]
#
# Output:
# 2
#
# Explanation:
# The two boomerangs are [[1,0],[0,0],[2,0]] and [[1,0],[2,0],[0,0]]
#
#
#
#
#
import math
class Solution(object):
def _numberOfBoomerangs(self, points):
"""
:type points: List[List[int]]
:rtype: int
"""
# Time Limit
result = []
distance = [[0] * len(points) for _ in range(len(points))]
for i in range(len(points)):
for j in range(i):
distance[i][j] = (points[i][0]-points[j][0])**2 + (points[i][1]-points[j][1])**2
# distance[i][j] = math.sqrt((points[i][0]-points[j][0])**2
# + (points[i][1]-points[j][1])**2)
distance[j][i] = distance[i][j]
for m in range(i):
if distance[i][j] == distance[i-1-m][j]:
result.append([points[i], points[j], points[i-1-m]])
result.append([points[i-1-m], points[j], points[i]])
for m in range(j):
if distance[i][j] == distance[i][j-1-m]:
result.append([points[j], points[i], points[j-1-m]])
result.append([points[j-1-m], points[i], points[j]])
return len(result)
def numberOfBoomerangs(self, points):
"""
:type points: List[List[int]]
:rtype: int
"""
conunt = 0
data = {}
for i in range(len(points)):
for j in range(i):
distance = (points[i][0]-points[j][0])**2 + (points[i][1]-points[j][1])**2
exts = data.get(distance)
if not exts:
data[distance] = [[i,j]]
else:
for ext in exts:
if ext[0] == i or ext[0] == j or ext[1] == i or ext[1] == j:
conunt += 2
data[distance].append([i,j])
return conunt
# if __name__ == '__main__':
# s = Solution()
# print s.numberOfBoomerangs([[0, 0], [1, 0], [2, 0]])
# print s.numberOfBoomerangs([[3327,-549],[9196,-8118],[7610,-9506],[5098,8392],[8582,7953],[1053,5802],[3847,2652],[7654,8355],[1614,-9409],[9986,5538],[4660,2944],[4528,-9512],[7483,-1455],[3422,-3966],[2037,-4456],[5107,-4635],[4996,655],[7247,2606],[1149,8697],[7350,6083],[3002,8403],[8238,6850],[1055,5892],[5205,9021],[2835,5191],[911,-2505],[4488,-4561],[7983,-1677],[336,-2243],[4358,-1274],[3302,9465],[4091,-5350],[120,7690],[3608,7622],[6388,-9042],[57,-610],[9361,8295],[6240,-3232],[540,7797],[2141,-6625],[9341,3053],[7223,3829],[4844,1558],[2152,-8467],[9316,6510],[259,-1030],[2327,-5650],[9972,8800],[2040,-6420],[2774,4780],[4538,-7169],[4171,-6101],[7479,-3237],[7019,-1981],[4561,-4488],[7746,254],[4917,4969],[4083,-238],[6528,-7413],[1295,-7804],[5450,-8446],[1166,-5871],[2256,-8862],[2929,-5704],[4718,2055],[5429,-4392],[4887,9600],[9507,-1282],[2715,2878],[6737,-6372],[8390,-9165],[3882,3308],[5805,4317],[9422,8685],[3257,-2931],[881,-1293],[8623,-1601],[2836,879],[5889,2118],[1527,607],[4173,-3044],[6215,5412],[2908,-7926],[4130,-8024],[1304,7219],[1956,-3954],[8055,5839],[5706,212],[6508,5128],[8897,9765],[2197,-3870],[8472,-2828],[4529,7661],[4403,-9582],[6131,-7717],[7377,-3344],[5591,9944],[2069,-5148],[8370,-7449],[6828,-3974],[6123,-1216],[2072,530],[975,-2221],[7094,-2516],[9259,-4009],[7249,7809],[8473,2074],[4981,-6998],[9735,5737],[9772,5866],[8020,-6499],[8874,-6389],[3445,-9057],[4815,8167],[9847,1643],[4193,2322],[6780,2617],[9204,4107],[396,6298],[1591,6008],[2289,-4807],[3817,762],[7267,5150],[116,-6646],[887,-3760],[5572,-4741],[9741,4446],[5223,-462],[1742,38],[7705,1589],[1682,-1750],[263,4814],[867,9467],[8921,7616],[5765,-3135],[3624,4406],[2058,-2559],[1520,-675],[2591,-2012],[2679,-169],[4228,-1749],[5090,-6031],[2697,-9687],[9859,791],[352,3916],[8732,-1614],[2166,8995],[3200,9385],[4814,-1527],[7001,579],[5338,-3023],[1337,-2604],[4418,-7143],[3073,3362],[845,-7896],[3193,-8575],[6707,4635],[1746,-595],[4949,1605],[6548,-8347],[1873,5281],[39,-5961],[4276,-409],[9777,-909],[8064,3130],[6022,-245],[108,7360],[7151,4526],[6569,-3423],[4240,-2585],[8681,-2567],[5192,5389],[2069,-3061],[1146,3370],[4896,7694],[5023,6770],[2975,-8586],[7161,-6396],[1005,6938],[2695,-4579],[69,-4931],[5176,177],[2429,-1320],[1055,8999],[5257,-4704],[2766,-6062],[9081,-2042],[5679,-2498],[1249,6825],[7224,-3854],[872,2247],[2916,-6153],[3661,-9923],[7451,-8982],[7016,6498],[6440,-6563],[1568,-8384],[9966,-9651],[296,1021],[9348,-8095],[2669,8466],[2196,-8249],[2777,7875],[5605,4026],[1053,-7170],[172,-8075],[1429,-6912],[5772,-8557],[9518,-424],[2461,2886],[2426,-1099],[6323,-6006],[6870,-3711],[696,3518],[3662,6396],[5424,-3668],[4863,7620],[4435,7640],[1847,-3608],[8018,-7100],[9222,-5457],[4825,7004],[3983,-3050],[8447,-6499],[2878,-9092],[6387,5304],[6162,-938],[5651,3032],[5351,6347],[2902,-4634],[2743,8326],[8050,-6042],[2298,-1163],[7950,-9502],[5229,-4031],[3398,-9196],[512,-5424],[7808,847],[7878,6255],[4349,7108],[7163,736],[8764,9677],[6151,-5585],[2709,-2146],[7114,5612],[3220,-3790],[290,-8730],[168,8941],[107,-5529],[9439,-8311],[440,9189],[2493,7304],[117,6653],[8151,-5653],[2908,8852],[1455,-3577],[5941,-3428],[6101,-7908],[7339,5162],[9946,-5546],[7126,9519],[7016,3769],[789,7184],[2710,-2751],[1655,-1499],[5290,-1553],[4042,-2217],[2103,-9488],[788,-3393],[1211,3696],[1811,9019],[6471,-2248],[5591,8924],[6196,2930],[4087,6143],[3736,7565],[5662,-9248],[1334,2803],[4289,-9604],[6404,2296],[8897,-8306],[7096,-708],[5829,9199],[6156,-3383],[2158,-2633],[6665,-9678],[6386,3137],[8074,1977],[2061,4271],[4908,-7500],[6766,4996],[66,8780],[5749,1400],[7935,38],[1797,-5660],[2334,7046],[2386,9430],[2690,-1784],[4982,-1154],[1185,3492],[6214,-2149],[3814,8952],[7340,8241],[930,-4247],[8864,2190],[8254,5630],[7186,-5328],[762,9287],[6072,8697],[9325,-5779],[9389,1660],[7620,-8224],[7442,-9690],[9992,-7576],[5509,7529],[2269,8075],[5380,-3917],[7027,-7280],[4324,-5691],[8474,3188],[6499,3080],[5170,-9962],[7752,5932],[9325,176],[982,-1349],[4398,371],[6663,-1630],[2147,-9543],[5032,8491],[9234,541],[6021,1503],[8616,7753],[3938,-8004],[6826,8263],[6305,-8348],[7803,9157],[4732,-674],[9195,-1164],[5258,8520],[9012,2592],[3523,-238],[2964,6538],[8132,1463],[3348,-6835],[6307,2582],[58,-7672],[437,5027],[6433,4375],[7023,3259],[8990,-6672],[4911,3146],[2485,-4005],[2472,8032],[4831,-5918],[2905,196],[6675,6428],[9958,9639],[9319,4443],[7454,-7333],[3960,3761],[1601,-9630],[2441,2038],[5397,-1125],[6413,2420],[8486,1756],[2101,3398],[4902,938],[5745,-2626],[5323,-3071],[1456,8228],[7125,-1869],[1008,3435],[4122,6679],[4230,1577],[9346,8190],[1690,947],[4913,4132],[9337,310],[3007,-4249],[9083,-8507],[7507,-2464],[1243,-7591],[4826,-3011],[6135,-9851],[3918,7591],[8377,-2605],[5723,-4262],[830,-3803],[2417,-8587],[7774,8116],[5955,9465],[5415,868],[9949,-5247],[1179,2956],[6856,6614],[801,-9285],[4150,8397],[9476,8976],[1738,-4389],[9126,2008],[3202,3855],[9403,-4723],[9593,6585],[1475,-7989],[7998,-4399],[127,306],[1418,-4458],[1174,1367],[6647,-7647],[4323,3503],[8967,1477],[4218,9469],[6226,3694],[8446,-2036],[9305,3924],[9972,8860],[7779,5727],[4137,-6275],[8664,1964],[5736,-6985],[7566,-7785],[3321,8984],[4109,4495],[352,757],[3201,1027],[4260,-1480],[8856,4831],[7990,-4918],[8525,-7212],[3046,-5817],[6712,-630],[3043,-5509],[1449,-6468],[8216,-3534],[5497,304],[9481,3063],[8871,9154],[8399,2981],[1,8751],[90,-6798],[6131,-9298],[8075,-5013],[5533,6065],[70,-9589],[5205,9468],[946,1917],[5191,-6011],[2760,-7008],[3873,7329],[9458,9370],[7633,5291],[8785,2857],[797,3537],[2190,-9201],[2288,-7720],[353,4771],[9334,-1572],[9759,1220],[845,-3819],[7983,6050],[2001,-1071],[4319,-2808],[9270,7080],[6537,3143],[4409,2347],[8866,8394],[7639,4003],[7603,4788],[7540,-207],[5587,6181],[8425,5941],[952,-5888],[721,-2937],[5332,-8433],[3244,-6685],[3969,5246],[2244,8289],[8790,-8486],[1721,-4673],[1009,-3870],[7675,9875],[876,-8334],[231,-1520],[6454,7771],[4625,2042],[304,9403],[4335,-8743],[3515,-4944],[4672,8847],[2975,7917],[8514,6945],[3163,758],[1586,1953],[8624,-6693],[7281,9633],[5789,1308],[5861,-6983],[2974,-3908],[7849,-572],[215,-7525]])
| 96.677419
| 6,179
| 0.590034
|
import math
class Solution(object):
def _numberOfBoomerangs(self, points):
result = []
distance = [[0] * len(points) for _ in range(len(points))]
for i in range(len(points)):
for j in range(i):
distance[i][j] = (points[i][0]-points[j][0])**2 + (points[i][1]-points[j][1])**2
distance[j][i] = distance[i][j]
for m in range(i):
if distance[i][j] == distance[i-1-m][j]:
result.append([points[i], points[j], points[i-1-m]])
result.append([points[i-1-m], points[j], points[i]])
for m in range(j):
if distance[i][j] == distance[i][j-1-m]:
result.append([points[j], points[i], points[j-1-m]])
result.append([points[j-1-m], points[i], points[j]])
return len(result)
def numberOfBoomerangs(self, points):
conunt = 0
data = {}
for i in range(len(points)):
for j in range(i):
distance = (points[i][0]-points[j][0])**2 + (points[i][1]-points[j][1])**2
exts = data.get(distance)
if not exts:
data[distance] = [[i,j]]
else:
for ext in exts:
if ext[0] == i or ext[0] == j or ext[1] == i or ext[1] == j:
conunt += 2
data[distance].append([i,j])
return conunt
| true
| true
|
f705538bc3b669ad295f2e7b446dda5111b30b7a
| 4,216
|
py
|
Python
|
recipes/libtiff/all/conanfile.py
|
AlexandreBossard/conan-center-index
|
6f89ce09b20795129b5ef63568a0a458b3d388ec
|
[
"MIT"
] | null | null | null |
recipes/libtiff/all/conanfile.py
|
AlexandreBossard/conan-center-index
|
6f89ce09b20795129b5ef63568a0a458b3d388ec
|
[
"MIT"
] | 1
|
2019-11-26T10:55:31.000Z
|
2019-11-26T10:55:31.000Z
|
recipes/libtiff/all/conanfile.py
|
AlexandreBossard/conan-center-index
|
6f89ce09b20795129b5ef63568a0a458b3d388ec
|
[
"MIT"
] | 1
|
2019-10-31T19:29:14.000Z
|
2019-10-31T19:29:14.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from conans import ConanFile, CMake, tools
import os
import shutil
class LibtiffConan(ConanFile):
name = "libtiff"
description = "Library for Tag Image File Format (TIFF)"
url = "https://github.com/conan-io/conan-center-index"
author = "Bincrafters <bincrafters@gmail.com>"
license = "MIT"
homepage = "http://www.simplesystems.org/libtiff"
topics = ("tiff", "image", "bigtiff", "tagged-image-file-format")
exports_sources = ["CMakeLists.txt"]
generators = "cmake"
settings = "os", "compiler", "build_type", "arch"
options = {"shared": [True, False], "fPIC": [True, False]}
default_options = {'shared': False, 'fPIC': True}
requires = "zlib/1.2.11"
_source_subfolder = "source_subfolder"
def config_options(self):
if self.settings.os == "Windows":
self.options.remove("fPIC")
del self.settings.compiler.libcxx
del self.settings.compiler.cppstd
def source(self):
tools.get(**self.conan_data["sources"][self.version])
os.rename('tiff-' + self.version, self._source_subfolder)
os.rename(os.path.join(self._source_subfolder, "CMakeLists.txt"),
os.path.join(self._source_subfolder, "CMakeListsOriginal.txt"))
shutil.copy("CMakeLists.txt",
os.path.join(self._source_subfolder, "CMakeLists.txt"))
def build(self):
cmake = CMake(self)
cmake.definitions['CMAKE_INSTALL_LIBDIR'] = 'lib'
cmake.definitions['CMAKE_INSTALL_BINDIR'] = 'bin'
cmake.definitions['CMAKE_INSTALL_INCLUDEDIR'] = 'include'
cmake.definitions["lzma"] = False
cmake.definitions["jpeg"] = False
cmake.definitions["jbig"] = False
if self.options.shared and self.settings.compiler == "Visual Studio":
# https://github.com/Microsoft/vcpkg/blob/master/ports/tiff/fix-cxx-shared-libs.patch
tools.replace_in_file(os.path.join(self._source_subfolder, 'libtiff', 'CMakeLists.txt'),
r'set_target_properties(tiffxx PROPERTIES SOVERSION ${SO_COMPATVERSION})',
r'set_target_properties(tiffxx PROPERTIES SOVERSION ${SO_COMPATVERSION} '
r'WINDOWS_EXPORT_ALL_SYMBOLS ON)')
if self.settings.os == "Windows" and self.settings.compiler != "Visual Studio":
tools.replace_in_file(os.path.join(self._source_subfolder, "CMakeListsOriginal.txt"),
"find_library(M_LIBRARY m)",
"if (NOT MINGW)\n find_library(M_LIBRARY m)\nendif()")
if self.version == '4.0.8':
# only one occurence must be patched. fixed in 4.0.9
tools.replace_in_file(os.path.join(self._source_subfolder, "CMakeListsOriginal.txt"),
"if (UNIX)",
"if (UNIX OR MINGW)")
tools.replace_in_file(os.path.join(self._source_subfolder, "CMakeListsOriginal.txt"),
"add_subdirectory(tools)\nadd_subdirectory(test)\nadd_subdirectory(contrib)\nadd_subdirectory(build)\n"
"add_subdirectory(man)\nadd_subdirectory(html)", "")
cmake.definitions["BUILD_SHARED_LIBS"] = self.options.shared
cmake.configure(source_folder=self._source_subfolder)
cmake.build()
cmake.install()
def package(self):
self.copy("COPYRIGHT", src=self._source_subfolder, dst="licenses", ignore_case=True, keep_path=False)
tools.rmdir(os.path.join(self.package_folder, 'lib', 'pkgconfig'))
def package_info(self):
self.cpp_info.libs = ["tiff", "tiffxx"]
if self.settings.os == "Windows" and self.settings.build_type == "Debug" and self.settings.compiler == 'Visual Studio':
self.cpp_info.libs = [lib+'d' for lib in self.cpp_info.libs]
if self.options.shared and self.settings.os == "Windows" and self.settings.compiler != 'Visual Studio':
self.cpp_info.libs = [lib+'.dll' for lib in self.cpp_info.libs]
if self.settings.os == "Linux":
self.cpp_info.libs.append("m")
| 47.909091
| 127
| 0.624526
|
from conans import ConanFile, CMake, tools
import os
import shutil
class LibtiffConan(ConanFile):
name = "libtiff"
description = "Library for Tag Image File Format (TIFF)"
url = "https://github.com/conan-io/conan-center-index"
author = "Bincrafters <bincrafters@gmail.com>"
license = "MIT"
homepage = "http://www.simplesystems.org/libtiff"
topics = ("tiff", "image", "bigtiff", "tagged-image-file-format")
exports_sources = ["CMakeLists.txt"]
generators = "cmake"
settings = "os", "compiler", "build_type", "arch"
options = {"shared": [True, False], "fPIC": [True, False]}
default_options = {'shared': False, 'fPIC': True}
requires = "zlib/1.2.11"
_source_subfolder = "source_subfolder"
def config_options(self):
if self.settings.os == "Windows":
self.options.remove("fPIC")
del self.settings.compiler.libcxx
del self.settings.compiler.cppstd
def source(self):
tools.get(**self.conan_data["sources"][self.version])
os.rename('tiff-' + self.version, self._source_subfolder)
os.rename(os.path.join(self._source_subfolder, "CMakeLists.txt"),
os.path.join(self._source_subfolder, "CMakeListsOriginal.txt"))
shutil.copy("CMakeLists.txt",
os.path.join(self._source_subfolder, "CMakeLists.txt"))
def build(self):
cmake = CMake(self)
cmake.definitions['CMAKE_INSTALL_LIBDIR'] = 'lib'
cmake.definitions['CMAKE_INSTALL_BINDIR'] = 'bin'
cmake.definitions['CMAKE_INSTALL_INCLUDEDIR'] = 'include'
cmake.definitions["lzma"] = False
cmake.definitions["jpeg"] = False
cmake.definitions["jbig"] = False
if self.options.shared and self.settings.compiler == "Visual Studio":
tools.replace_in_file(os.path.join(self._source_subfolder, 'libtiff', 'CMakeLists.txt'),
r'set_target_properties(tiffxx PROPERTIES SOVERSION ${SO_COMPATVERSION})',
r'set_target_properties(tiffxx PROPERTIES SOVERSION ${SO_COMPATVERSION} '
r'WINDOWS_EXPORT_ALL_SYMBOLS ON)')
if self.settings.os == "Windows" and self.settings.compiler != "Visual Studio":
tools.replace_in_file(os.path.join(self._source_subfolder, "CMakeListsOriginal.txt"),
"find_library(M_LIBRARY m)",
"if (NOT MINGW)\n find_library(M_LIBRARY m)\nendif()")
if self.version == '4.0.8':
tools.replace_in_file(os.path.join(self._source_subfolder, "CMakeListsOriginal.txt"),
"if (UNIX)",
"if (UNIX OR MINGW)")
tools.replace_in_file(os.path.join(self._source_subfolder, "CMakeListsOriginal.txt"),
"add_subdirectory(tools)\nadd_subdirectory(test)\nadd_subdirectory(contrib)\nadd_subdirectory(build)\n"
"add_subdirectory(man)\nadd_subdirectory(html)", "")
cmake.definitions["BUILD_SHARED_LIBS"] = self.options.shared
cmake.configure(source_folder=self._source_subfolder)
cmake.build()
cmake.install()
def package(self):
self.copy("COPYRIGHT", src=self._source_subfolder, dst="licenses", ignore_case=True, keep_path=False)
tools.rmdir(os.path.join(self.package_folder, 'lib', 'pkgconfig'))
def package_info(self):
self.cpp_info.libs = ["tiff", "tiffxx"]
if self.settings.os == "Windows" and self.settings.build_type == "Debug" and self.settings.compiler == 'Visual Studio':
self.cpp_info.libs = [lib+'d' for lib in self.cpp_info.libs]
if self.options.shared and self.settings.os == "Windows" and self.settings.compiler != 'Visual Studio':
self.cpp_info.libs = [lib+'.dll' for lib in self.cpp_info.libs]
if self.settings.os == "Linux":
self.cpp_info.libs.append("m")
| true
| true
|
f70553d36267da3daf161458ce9a35ebf850e411
| 1,946
|
py
|
Python
|
examples/functions/python3/mmlogic-simple/mmf.py
|
CodeLingoBot/open-match
|
9c943d5a10b4d110a5dc8194ea3baffb4d4ddae0
|
[
"Apache-2.0"
] | null | null | null |
examples/functions/python3/mmlogic-simple/mmf.py
|
CodeLingoBot/open-match
|
9c943d5a10b4d110a5dc8194ea3baffb4d4ddae0
|
[
"Apache-2.0"
] | null | null | null |
examples/functions/python3/mmlogic-simple/mmf.py
|
CodeLingoBot/open-match
|
9c943d5a10b4d110a5dc8194ea3baffb4d4ddae0
|
[
"Apache-2.0"
] | null | null | null |
#! /usr/bin/env python3
#Copyright 2018 Google LLC
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
import random
def makeMatches(profile_dict, player_pools):
###########################################################################
# This is the exciting part, and where most of your custom code would go! #
###########################################################################
# The python3 MMF harness passed this function filtered players and their
# filtered attributes in the player_pools dictionary. If we wanted to evaluate
# other player attributes, we could connect to redis directly and query the
# players by their ID to get the entire 'properties' player JSON passed in
# to the frontend API when they entered matchmaking.
# This basic example just pulls players at random from the specified pools in the
# profile. This just serves to show how the dictionaries are accessed and you
# should write your own rigourous logic here.
for roster in profile_dict['properties']['rosters']:
for player in roster['players']:
if 'pool' in player:
player['id'] = random.choice(list(player_pools[player['pool']]))
del player_pools[player['pool']][player['id']]
print("Selected player %s from pool %s (strategy: RANDOM)" % (player['id'], player['pool']))
else:
print(player)
return profile_dict
| 48.65
| 108
| 0.649024
|
import random
def makeMatches(profile_dict, player_pools):
| true
| true
|
f7055576cac41ce51631bbc57db651e00990eb63
| 4,019
|
py
|
Python
|
zerver/lib/markdown/help_relative_links.py
|
moazzammoriani/zulip
|
ca506f71dc8b733827a6bf532b107291b4839e55
|
[
"Apache-2.0"
] | null | null | null |
zerver/lib/markdown/help_relative_links.py
|
moazzammoriani/zulip
|
ca506f71dc8b733827a6bf532b107291b4839e55
|
[
"Apache-2.0"
] | null | null | null |
zerver/lib/markdown/help_relative_links.py
|
moazzammoriani/zulip
|
ca506f71dc8b733827a6bf532b107291b4839e55
|
[
"Apache-2.0"
] | null | null | null |
import re
from typing import Any, List, Match, Optional
from markdown import Markdown
from markdown.extensions import Extension
from markdown.preprocessors import Preprocessor
from zerver.lib.markdown.preprocessor_priorities import PREPROCESSOR_PRIORITES
# There is a lot of duplicated code between this file and
# help_settings_links.py. So if you're making a change here consider making
# it there as well.
REGEXP = re.compile(r"\{relative\|(?P<link_type>.*?)\|(?P<key>.*?)\}")
gear_info = {
# The pattern is key: [name, link]
# key is from REGEXP: `{relative|gear|key}`
# name is what the item is called in the gear menu: `Select **name**.`
# link is used for relative links: `Select [name](link).`
"manage-streams": ["Manage streams", "/#streams/subscribed"],
"settings": ["Personal Settings", "/#settings/profile"],
"manage-organization": ["Manage organization", "/#organization/organization-profile"],
"integrations": ["Integrations", "/integrations"],
"stats": ["Usage statistics", "/stats"],
"plans": ["Plans and pricing", "/plans"],
"billing": ["Billing", "/billing"],
"invite": ["Invite users", "/#invite"],
}
gear_instructions = """
1. Click on the **gear** (<i class="fa fa-cog"></i>) icon in the upper
right corner of the web or desktop app.
1. Select {item}.
"""
def gear_handle_match(key: str) -> str:
if relative_help_links:
item = f"[{gear_info[key][0]}]({gear_info[key][1]})"
else:
item = f"**{gear_info[key][0]}**"
return gear_instructions.format(item=item)
stream_info = {
"all": ["All streams", "/#streams/all"],
"subscribed": ["Subscribed", "/#streams/subscribed"],
}
stream_instructions_no_link = """
1. Click on the **gear** (<i class="fa fa-cog"></i>) icon in the upper
right corner of the web or desktop app.
1. Click **Manage streams**.
"""
def stream_handle_match(key: str) -> str:
if relative_help_links:
return f"1. Go to [{stream_info[key][0]}]({stream_info[key][1]})."
if key == "all":
return stream_instructions_no_link + "\n\n1. Click **All streams** in the upper left."
return stream_instructions_no_link
LINK_TYPE_HANDLERS = {
"gear": gear_handle_match,
"stream": stream_handle_match,
}
class RelativeLinksHelpExtension(Extension):
def extendMarkdown(self, md: Markdown) -> None:
"""Add RelativeLinksHelpExtension to the Markdown instance."""
md.registerExtension(self)
md.preprocessors.register(
RelativeLinks(), "help_relative_links", PREPROCESSOR_PRIORITES["help_relative_links"]
)
relative_help_links: Optional[bool] = None
def set_relative_help_links(value: bool) -> None:
global relative_help_links
relative_help_links = value
class RelativeLinks(Preprocessor):
def run(self, lines: List[str]) -> List[str]:
done = False
while not done:
for line in lines:
loc = lines.index(line)
match = REGEXP.search(line)
if match:
text = [self.handleMatch(match)]
# The line that contains the directive to include the macro
# may be preceded or followed by text or tags, in that case
# we need to make sure that any preceding or following text
# stays the same.
line_split = REGEXP.split(line, maxsplit=0)
preceding = line_split[0]
following = line_split[-1]
text = [preceding, *text, following]
lines = lines[:loc] + text + lines[loc + 1 :]
break
else:
done = True
return lines
def handleMatch(self, match: Match[str]) -> str:
return LINK_TYPE_HANDLERS[match.group("link_type")](match.group("key"))
def makeExtension(*args: Any, **kwargs: Any) -> RelativeLinksHelpExtension:
return RelativeLinksHelpExtension(*args, **kwargs)
| 33.214876
| 97
| 0.629759
|
import re
from typing import Any, List, Match, Optional
from markdown import Markdown
from markdown.extensions import Extension
from markdown.preprocessors import Preprocessor
from zerver.lib.markdown.preprocessor_priorities import PREPROCESSOR_PRIORITES
# it there as well.
REGEXP = re.compile(r"\{relative\|(?P<link_type>.*?)\|(?P<key>.*?)\}")
gear_info = {
# The pattern is key: [name, link]
# key is from REGEXP: `{relative|gear|key}`
# name is what the item is called in the gear menu: `Select **name**.`
# link is used for relative links: `Select [name](link).`
"manage-streams": ["Manage streams", "/#streams/subscribed"],
"settings": ["Personal Settings", "/#settings/profile"],
"manage-organization": ["Manage organization", "/#organization/organization-profile"],
"integrations": ["Integrations", "/integrations"],
"stats": ["Usage statistics", "/stats"],
"plans": ["Plans and pricing", "/plans"],
"billing": ["Billing", "/billing"],
"invite": ["Invite users", "/#invite"],
}
gear_instructions = """
1. Click on the **gear** (<i class="fa fa-cog"></i>) icon in the upper
right corner of the web or desktop app.
1. Select {item}.
"""
def gear_handle_match(key: str) -> str:
if relative_help_links:
item = f"[{gear_info[key][0]}]({gear_info[key][1]})"
else:
item = f"**{gear_info[key][0]}**"
return gear_instructions.format(item=item)
stream_info = {
"all": ["All streams", "/#streams/all"],
"subscribed": ["Subscribed", "/#streams/subscribed"],
}
stream_instructions_no_link = """
1. Click on the **gear** (<i class="fa fa-cog"></i>) icon in the upper
right corner of the web or desktop app.
1. Click **Manage streams**.
"""
def stream_handle_match(key: str) -> str:
if relative_help_links:
return f"1. Go to [{stream_info[key][0]}]({stream_info[key][1]})."
if key == "all":
return stream_instructions_no_link + "\n\n1. Click **All streams** in the upper left."
return stream_instructions_no_link
LINK_TYPE_HANDLERS = {
"gear": gear_handle_match,
"stream": stream_handle_match,
}
class RelativeLinksHelpExtension(Extension):
def extendMarkdown(self, md: Markdown) -> None:
md.registerExtension(self)
md.preprocessors.register(
RelativeLinks(), "help_relative_links", PREPROCESSOR_PRIORITES["help_relative_links"]
)
relative_help_links: Optional[bool] = None
def set_relative_help_links(value: bool) -> None:
global relative_help_links
relative_help_links = value
class RelativeLinks(Preprocessor):
def run(self, lines: List[str]) -> List[str]:
done = False
while not done:
for line in lines:
loc = lines.index(line)
match = REGEXP.search(line)
if match:
text = [self.handleMatch(match)]
# The line that contains the directive to include the macro
# may be preceded or followed by text or tags, in that case
# we need to make sure that any preceding or following text
# stays the same.
line_split = REGEXP.split(line, maxsplit=0)
preceding = line_split[0]
following = line_split[-1]
text = [preceding, *text, following]
lines = lines[:loc] + text + lines[loc + 1 :]
break
else:
done = True
return lines
def handleMatch(self, match: Match[str]) -> str:
return LINK_TYPE_HANDLERS[match.group("link_type")](match.group("key"))
def makeExtension(*args: Any, **kwargs: Any) -> RelativeLinksHelpExtension:
return RelativeLinksHelpExtension(*args, **kwargs)
| true
| true
|
f7055739785c4cd04f80cc452e257c475ad0395d
| 1,476
|
py
|
Python
|
prova1/prova1.py
|
samcost/POO
|
5c280407abb7aa9db1c82e52c34fd372465e8fe2
|
[
"MIT"
] | null | null | null |
prova1/prova1.py
|
samcost/POO
|
5c280407abb7aa9db1c82e52c34fd372465e8fe2
|
[
"MIT"
] | null | null | null |
prova1/prova1.py
|
samcost/POO
|
5c280407abb7aa9db1c82e52c34fd372465e8fe2
|
[
"MIT"
] | null | null | null |
import math
class Robo:
def __init__(self,nome):
self.__nome = nome
self.__posicao = [0.0,0.0]
self.__em_op = False
@property
def nome(self):
return self.__nome
@nome.setter
def nome(self, alterar_nome):
self.__nome = alterar_nome
@property
def posicao(self):
return self.__posicao
def __str__(self):
return(f'Robô: {self.__nome}, {self.__em_op} em {self.__posicao}')
def distancia(self,nposicao):
self.nposicao = nposicao
print(math.sqrt(((self.__posicao[0]-self.nposicao[0])**2)+((self.__posicao[1]-self.nposicao[1])**2)))
def move(self,nposicao):
self.__posicao = nposicao
class SistemaMultiRobos():
def __init__(self,quantidade):
self.__robos= []
for i in range(quantidade):
self.__robos.append(Robo(i))
def _acha_robo_ocioso(self):
for i in self.__robos:
if i.__em_op== False:
return (f'Robô: {i} livre')
def imprime_robos(self):
for i in self.__robos:
print(i)
def despacha(self, coordenadas):
pass
if __name__ == '__main__':
smr = SistemaMultiRobos(3) # sistema com 3 robôs
smr.imprime_robos()
smr.despacha((5.0, 5.0))
smr.imprime_robos()
smr.despacha((-5.0, -5.0))
smr.imprime_robos()
smr.despacha((0.0, -10.0))
smr.imprime_robos()
smr.despacha((15.0, 15.0))
smr.imprime_robos()
| 24.196721
| 109
| 0.592818
|
import math
class Robo:
def __init__(self,nome):
self.__nome = nome
self.__posicao = [0.0,0.0]
self.__em_op = False
@property
def nome(self):
return self.__nome
@nome.setter
def nome(self, alterar_nome):
self.__nome = alterar_nome
@property
def posicao(self):
return self.__posicao
def __str__(self):
return(f'Robô: {self.__nome}, {self.__em_op} em {self.__posicao}')
def distancia(self,nposicao):
self.nposicao = nposicao
print(math.sqrt(((self.__posicao[0]-self.nposicao[0])**2)+((self.__posicao[1]-self.nposicao[1])**2)))
def move(self,nposicao):
self.__posicao = nposicao
class SistemaMultiRobos():
def __init__(self,quantidade):
self.__robos= []
for i in range(quantidade):
self.__robos.append(Robo(i))
def _acha_robo_ocioso(self):
for i in self.__robos:
if i.__em_op== False:
return (f'Robô: {i} livre')
def imprime_robos(self):
for i in self.__robos:
print(i)
def despacha(self, coordenadas):
pass
if __name__ == '__main__':
smr = SistemaMultiRobos(3)
smr.imprime_robos()
smr.despacha((5.0, 5.0))
smr.imprime_robos()
smr.despacha((-5.0, -5.0))
smr.imprime_robos()
smr.despacha((0.0, -10.0))
smr.imprime_robos()
smr.despacha((15.0, 15.0))
smr.imprime_robos()
| true
| true
|
f70557ba49fa8bbd10988a5a6b0b41a89531538e
| 13,690
|
py
|
Python
|
lib/JumpScale/tools/codetools/CodeTools.py
|
Jumpscale/jumpscale_core8
|
f80ac9b1ab99b833ee7adb17700dcf4ef35f3734
|
[
"Apache-2.0"
] | 8
|
2016-04-14T14:04:57.000Z
|
2020-06-09T00:24:34.000Z
|
lib/JumpScale/tools/codetools/CodeTools.py
|
Jumpscale/jumpscale_core8
|
f80ac9b1ab99b833ee7adb17700dcf4ef35f3734
|
[
"Apache-2.0"
] | 418
|
2016-01-25T10:30:00.000Z
|
2021-09-08T12:29:13.000Z
|
lib/JumpScale/tools/codetools/CodeTools.py
|
Jumpscale/jumpscale_core8
|
f80ac9b1ab99b833ee7adb17700dcf4ef35f3734
|
[
"Apache-2.0"
] | 9
|
2016-04-21T07:21:17.000Z
|
2022-01-24T10:35:54.000Z
|
# from JumpScale.baselib.codeexecutor.CodeExecutor import CodeExecutor
import inspect
from JumpScale import j
from ClassBase import ClassBase, JSModelBase, JSRootModelBase
from TemplateEngineWrapper import TemplateEngineWrapper
from JumpScale.data.regex.RegexTools import RegexTools
from TextFileEditor import TextFileEditor
from WordReplacer import WordReplacer
# ujson.dumps does not support some arguments like separators, indent ...etc
def isPrimAttribute(obj, key):
if key[-1] == "s":
funcprop = "new_%s" % key[:-1]
else:
funcprop = "new_%s" % key
isprimtype = not hasattr(obj, funcprop)
return isprimtype, funcprop
class Struct:
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
class CodeTools:
def __init__(self):
self.__jslocation__ = "j.tools.code"
self._templateengine = None
# self.executor = CodeExecutor()
self._regex = None
self._wordreplacer = None
self._codemanager = None
self._texteditor = None
@property
def codemanager(self):
if self._codemanager is None:
from CodeManager import CodeManager
self._codemanager = CodeManager()
return self._codemanager
@property
def regex(self):
if self._regex is None:
self._regex = RegexTools()
return self._regex
@property
def templateengine(self):
if self._templateengine is None:
self._templateengine = TemplateEngineWrapper()
return self._templateengine
@property
def texteditor(self):
if self._texteditor is None:
self._texteditor = TextFileEditor()
return self._texteditor
@property
def wordreplacer(self):
if self._wordreplacer is None:
self._wordreplacer = WordReplacer()
return self._wordreplacer
def textToTitle(self, text, maxnrchars=60):
"""
try to create a title out of text, ignoring irrelevant words and making lower case and removing
not needed chars
"""
ignore = "for in yes no after up down the"
ignoreitems = ignore.split(" ")
keepchars = "abcdefghijklmnopqrstuvwxyz1234567890 "
out = ""
text = text.lower().strip()
for char in text:
if char in keepchars:
out += char
text = out
text = text.replace(" ", "")
text = text.replace(" ", "")
out = ""
nr = 0
for item in text.split(" "):
if item not in ignoreitems:
nr += len(item)
if nr < maxnrchars:
out += item + " "
if len(text.split(" ")) > 0:
text = out.strip()
if len(text) > maxnrchars:
text = text[:maxnrchars]
return text
def classInfoPrint(self, classs):
"""
print info like source code of class
"""
filepath, linenr, sourcecode = self.classInfoGet(classs)
print(("line:%s in path:%s" % (linenr, filepath)))
print(sourcecode)
def classInfoGet(self, classs):
"""
returns filepath,linenr,sourcecode
"""
code, nr = inspect.getsourcelines(classs.__class__)
code = "".join(code)
path = inspect.getsourcefile(classs.__class__)
return path, nr, code
def classEditGeany(self, classs):
"""
look for editor (uses geany) and then edit the file
"""
filepath, linenr, sourcecode = self.classInfoGet(classs)
j.sal.process.executeWithoutPipe("geany %s" % filepath)
def classGetBase(self):
return ClassBase
# def classGetAppserver6GreenletSchedule(self):
# return Appserver6GreenletScheduleBase
# def classGetAppserver6Greenlet(self):
# return Appserver6GreenletBase
# def classGetAppserver6GreenletTasklets(self):
# return Appserver6GreenletTaskletsBase
def dict2object(self, obj, data):
if obj is None:
return Struct(**data)
if hasattr(obj, "_dict2obj"):
return obj._dict2obj(data)
if isinstance(data, dict):
for key, value in list(data.items()):
# is for new obj functionname
objpropname = "%s" % key
if isinstance(value, dict) and isinstance(obj.__dict__[objpropname], dict):
# is a real dict (not a dict as representation of an object)
isprimtype, funcprop = isPrimAttribute(obj, key)
if not isprimtype:
raise j.exceptions.RuntimeError("not supported")
else:
for valkey, valval in list(value.items()):
attr = getattr(obj, key)
attr[valkey] = valval
elif isinstance(data[key], list):
isprimtype, funcprop = isPrimAttribute(obj, key)
if not isprimtype:
method = getattr(obj, funcprop)
for valval in value:
newobj = method()
self.dict2object(newobj, valval)
else:
for valval, in value:
attr = getattr(obj, key)
attr.append(valval)
elif isinstance(value, dict) and not isinstance(obj.__dict__[objpropname], dict):
# is a dict which represents another object
raise j.exceptions.RuntimeError("not supported, only 1 level deep objects")
else:
obj.__dict__[objpropname] = value
return obj
else:
return data
def dict2JSModelobject(self, obj, data):
if isinstance(data, dict):
for key, value in list(data.items()):
# is for new obj functionname
objpropname = "_P_%s" % key if not key.startswith('_P_') else key
if isinstance(value, dict) and isinstance(obj.__dict__[objpropname], dict):
# is a real dict (not a dict as representation of an object)
isprimtype, funcprop = isPrimAttribute(obj, key)
if not isprimtype:
method = getattr(obj, funcprop)
for valkey, valval in list(value.items()):
newobj = method(valkey)
self.dict2JSModelobject(newobj, valval)
else:
for valkey, valval in list(value.items()):
attr = getattr(obj, key)
attr[valkey] = valval
elif isinstance(value, list):
if key == '_meta':
# we do not duplicate meta
continue
isprimtype, funcprop = isPrimAttribute(obj, key)
if not isprimtype:
method = getattr(obj, funcprop)
for valval in value:
newobj = method()
self.dict2JSModelobject(newobj, valval)
else:
for valval in value:
attr = getattr(obj, key)
attr.append(valval)
elif isinstance(value, dict) and not isinstance(obj.__dict__[objpropname], dict):
# is a dict which represents another object
obj.__dict__[objpropname] = self.dict2JSModelobject(obj.__dict__[objpropname], value)
else:
obj.__dict__[objpropname] = value
return obj
else:
return data
# def dict2object2(self,d):
# if isinstance(d, dict):
#n = {}
# for item in d:
# if isinstance(d[item], dict):
#n[item] = dict2obj(d[item])
# elif isinstance(d[item], (list, tuple)):
#n[item] = [dict2obj(elem) for elem in d[item]]
# else:
#n[item] = d[item]
# return type('obj_from_dict', (object,), n)
# else:
# return d
def object2dict4index(self, obj):
"""
convert object to a dict
only properties on first level are considered
and properties of basic types like int,str,float,bool,dict,list
ideal to index the basics of an object
"""
result = {}
def toStr(obj, possibleList=True):
if isinstance(obj, (str, int, float, bool)) or obj is None:
return str(obj)
elif possibleList == True and j.data.types.list.check(obj):
r = ""
for item in obj:
rr = toStr(obj, possibleList=False)
if rr != "":
r += "%s," % rr
r = r.rstrip(",")
return r
return ""
if isinstance(obj, ClassBase):
for key, value in list(obj.__dict__.items()):
if key[0:3] == "_P_":
key = key[3:]
elif key[0] == "_":
continue
if j.data.types.dict.check(value):
for key2 in list(value.keys()):
r = toStr(value[key2])
if r != "":
result["%s.%s" (key, key2)] = r
else:
r = toStr(value)
if r != "":
result[key] = r
return result
def object2dict(self, obj, dieOnUnknown=False, ignoreKeys=[], ignoreUnderscoreKeys=False):
if j.data.types.dict.check(obj):
return obj
data = {}
def todict(obj, data, ignoreKeys):
if isinstance(obj, dict):
value = {}
for key in list(obj.keys()):
if key in ignoreKeys:
continue
if ignoreUnderscoreKeys and key and key[0] == "_":
continue
value[key] = todict(obj[key], {}, ignoreKeys)
return value
elif isinstance(obj, (tuple, list)):
value = []
for item in obj:
value.append(todict(item, {}, ignoreKeys))
return value
elif isinstance(obj, str):
return obj.encode('utf8')
elif isinstance(obj, (int, str, float, bool)) or obj is None:
return obj
elif isinstance(obj, bytes) or obj is None:
return obj.decode('utf-8', 'ignore')
elif isinstance(obj, ClassBase):
if hasattr(obj, "_obj2dict"):
return obj._obj2dict()
else:
for key, value in list(obj.__dict__.items()):
if key[0:3] == "_P_":
key = key[3:]
if key in ignoreKeys:
continue
elif ignoreUnderscoreKeys and key[0] == "_":
continue
data[key] = todict(value, {}, ignoreKeys)
return data
else:
#from JumpScale.core.Shell import ipshellDebug,ipshell
# print "DEBUG NOW Can only convert object to dict with properties basic types or inherited of ClassBase"
# ipshell()
if dieOnUnknown:
raise j.exceptions.RuntimeError(
"Can only convert object to dict with properties basic types or inherited of ClassBase")
try:
val = str(value)
except:
val = "__UNKNOWN__"
return val
out = todict(obj, data, ignoreKeys)
# print out
return out
def object2yaml(self, obj):
return j.data.serializer.yaml.dumps(self.object2dict(obj))
def object2json(self, obj, pretty=False, skiperrors=False, ignoreKeys=[], ignoreUnderscoreKeys=False):
obj = self.object2dict(obj, dieOnUnknown=not skiperrors, ignoreKeys=ignoreKeys,
ignoreUnderscoreKeys=ignoreUnderscoreKeys)
if pretty:
return j.data.serializer.json.dumps(obj, indent=2, sort_keys=True)
else:
return j.data.serializer.json.dumps(obj)
def pprint(self, obj):
result = self.object2yaml(obj)
result = result.replace("!!python/unicode", "")
print(result)
def deIndent(self, content, level=1):
for i in range(0, level):
content = self._deIndent(content)
return content
def indent(self, content, level=1):
if not content:
return content
if content[-1] == "\n":
content = content[:-1]
lines = list()
for line in content.splitlines():
indent = " " * 4 * level
lines.append("%s%s\n" % (indent, line))
return "".join(lines)
def _deIndent(self, content):
# remove garbage & fix identation
content2 = ""
for line in content.split("\n"):
if line.strip() == "":
content2 += "\n"
else:
if line.find(" ") != 0:
raise j.exceptions.RuntimeError("identation error for %s." % content)
content2 += "%s\n" % line[4:]
return content2
| 36.801075
| 121
| 0.514171
|
import inspect
from JumpScale import j
from ClassBase import ClassBase, JSModelBase, JSRootModelBase
from TemplateEngineWrapper import TemplateEngineWrapper
from JumpScale.data.regex.RegexTools import RegexTools
from TextFileEditor import TextFileEditor
from WordReplacer import WordReplacer
def isPrimAttribute(obj, key):
if key[-1] == "s":
funcprop = "new_%s" % key[:-1]
else:
funcprop = "new_%s" % key
isprimtype = not hasattr(obj, funcprop)
return isprimtype, funcprop
class Struct:
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
class CodeTools:
def __init__(self):
self.__jslocation__ = "j.tools.code"
self._templateengine = None
self._regex = None
self._wordreplacer = None
self._codemanager = None
self._texteditor = None
@property
def codemanager(self):
if self._codemanager is None:
from CodeManager import CodeManager
self._codemanager = CodeManager()
return self._codemanager
@property
def regex(self):
if self._regex is None:
self._regex = RegexTools()
return self._regex
@property
def templateengine(self):
if self._templateengine is None:
self._templateengine = TemplateEngineWrapper()
return self._templateengine
@property
def texteditor(self):
if self._texteditor is None:
self._texteditor = TextFileEditor()
return self._texteditor
@property
def wordreplacer(self):
if self._wordreplacer is None:
self._wordreplacer = WordReplacer()
return self._wordreplacer
def textToTitle(self, text, maxnrchars=60):
ignore = "for in yes no after up down the"
ignoreitems = ignore.split(" ")
keepchars = "abcdefghijklmnopqrstuvwxyz1234567890 "
out = ""
text = text.lower().strip()
for char in text:
if char in keepchars:
out += char
text = out
text = text.replace(" ", "")
text = text.replace(" ", "")
out = ""
nr = 0
for item in text.split(" "):
if item not in ignoreitems:
nr += len(item)
if nr < maxnrchars:
out += item + " "
if len(text.split(" ")) > 0:
text = out.strip()
if len(text) > maxnrchars:
text = text[:maxnrchars]
return text
def classInfoPrint(self, classs):
filepath, linenr, sourcecode = self.classInfoGet(classs)
print(("line:%s in path:%s" % (linenr, filepath)))
print(sourcecode)
def classInfoGet(self, classs):
code, nr = inspect.getsourcelines(classs.__class__)
code = "".join(code)
path = inspect.getsourcefile(classs.__class__)
return path, nr, code
def classEditGeany(self, classs):
filepath, linenr, sourcecode = self.classInfoGet(classs)
j.sal.process.executeWithoutPipe("geany %s" % filepath)
def classGetBase(self):
return ClassBase
def dict2object(self, obj, data):
if obj is None:
return Struct(**data)
if hasattr(obj, "_dict2obj"):
return obj._dict2obj(data)
if isinstance(data, dict):
for key, value in list(data.items()):
objpropname = "%s" % key
if isinstance(value, dict) and isinstance(obj.__dict__[objpropname], dict):
isprimtype, funcprop = isPrimAttribute(obj, key)
if not isprimtype:
raise j.exceptions.RuntimeError("not supported")
else:
for valkey, valval in list(value.items()):
attr = getattr(obj, key)
attr[valkey] = valval
elif isinstance(data[key], list):
isprimtype, funcprop = isPrimAttribute(obj, key)
if not isprimtype:
method = getattr(obj, funcprop)
for valval in value:
newobj = method()
self.dict2object(newobj, valval)
else:
for valval, in value:
attr = getattr(obj, key)
attr.append(valval)
elif isinstance(value, dict) and not isinstance(obj.__dict__[objpropname], dict):
raise j.exceptions.RuntimeError("not supported, only 1 level deep objects")
else:
obj.__dict__[objpropname] = value
return obj
else:
return data
def dict2JSModelobject(self, obj, data):
if isinstance(data, dict):
for key, value in list(data.items()):
objpropname = "_P_%s" % key if not key.startswith('_P_') else key
if isinstance(value, dict) and isinstance(obj.__dict__[objpropname], dict):
isprimtype, funcprop = isPrimAttribute(obj, key)
if not isprimtype:
method = getattr(obj, funcprop)
for valkey, valval in list(value.items()):
newobj = method(valkey)
self.dict2JSModelobject(newobj, valval)
else:
for valkey, valval in list(value.items()):
attr = getattr(obj, key)
attr[valkey] = valval
elif isinstance(value, list):
if key == '_meta':
continue
isprimtype, funcprop = isPrimAttribute(obj, key)
if not isprimtype:
method = getattr(obj, funcprop)
for valval in value:
newobj = method()
self.dict2JSModelobject(newobj, valval)
else:
for valval in value:
attr = getattr(obj, key)
attr.append(valval)
elif isinstance(value, dict) and not isinstance(obj.__dict__[objpropname], dict):
obj.__dict__[objpropname] = self.dict2JSModelobject(obj.__dict__[objpropname], value)
else:
obj.__dict__[objpropname] = value
return obj
else:
return data
def object2dict4index(self, obj):
result = {}
def toStr(obj, possibleList=True):
if isinstance(obj, (str, int, float, bool)) or obj is None:
return str(obj)
elif possibleList == True and j.data.types.list.check(obj):
r = ""
for item in obj:
rr = toStr(obj, possibleList=False)
if rr != "":
r += "%s," % rr
r = r.rstrip(",")
return r
return ""
if isinstance(obj, ClassBase):
for key, value in list(obj.__dict__.items()):
if key[0:3] == "_P_":
key = key[3:]
elif key[0] == "_":
continue
if j.data.types.dict.check(value):
for key2 in list(value.keys()):
r = toStr(value[key2])
if r != "":
result["%s.%s" (key, key2)] = r
else:
r = toStr(value)
if r != "":
result[key] = r
return result
def object2dict(self, obj, dieOnUnknown=False, ignoreKeys=[], ignoreUnderscoreKeys=False):
if j.data.types.dict.check(obj):
return obj
data = {}
def todict(obj, data, ignoreKeys):
if isinstance(obj, dict):
value = {}
for key in list(obj.keys()):
if key in ignoreKeys:
continue
if ignoreUnderscoreKeys and key and key[0] == "_":
continue
value[key] = todict(obj[key], {}, ignoreKeys)
return value
elif isinstance(obj, (tuple, list)):
value = []
for item in obj:
value.append(todict(item, {}, ignoreKeys))
return value
elif isinstance(obj, str):
return obj.encode('utf8')
elif isinstance(obj, (int, str, float, bool)) or obj is None:
return obj
elif isinstance(obj, bytes) or obj is None:
return obj.decode('utf-8', 'ignore')
elif isinstance(obj, ClassBase):
if hasattr(obj, "_obj2dict"):
return obj._obj2dict()
else:
for key, value in list(obj.__dict__.items()):
if key[0:3] == "_P_":
key = key[3:]
if key in ignoreKeys:
continue
elif ignoreUnderscoreKeys and key[0] == "_":
continue
data[key] = todict(value, {}, ignoreKeys)
return data
else:
if dieOnUnknown:
raise j.exceptions.RuntimeError(
"Can only convert object to dict with properties basic types or inherited of ClassBase")
try:
val = str(value)
except:
val = "__UNKNOWN__"
return val
out = todict(obj, data, ignoreKeys)
return out
def object2yaml(self, obj):
return j.data.serializer.yaml.dumps(self.object2dict(obj))
def object2json(self, obj, pretty=False, skiperrors=False, ignoreKeys=[], ignoreUnderscoreKeys=False):
obj = self.object2dict(obj, dieOnUnknown=not skiperrors, ignoreKeys=ignoreKeys,
ignoreUnderscoreKeys=ignoreUnderscoreKeys)
if pretty:
return j.data.serializer.json.dumps(obj, indent=2, sort_keys=True)
else:
return j.data.serializer.json.dumps(obj)
def pprint(self, obj):
result = self.object2yaml(obj)
result = result.replace("!!python/unicode", "")
print(result)
def deIndent(self, content, level=1):
for i in range(0, level):
content = self._deIndent(content)
return content
def indent(self, content, level=1):
if not content:
return content
if content[-1] == "\n":
content = content[:-1]
lines = list()
for line in content.splitlines():
indent = " " * 4 * level
lines.append("%s%s\n" % (indent, line))
return "".join(lines)
def _deIndent(self, content):
content2 = ""
for line in content.split("\n"):
if line.strip() == "":
content2 += "\n"
else:
if line.find(" ") != 0:
raise j.exceptions.RuntimeError("identation error for %s." % content)
content2 += "%s\n" % line[4:]
return content2
| true
| true
|
f70557d356725dad83576aa76ea58678d0c0e049
| 1,921
|
py
|
Python
|
tests/test_trim.py
|
jwilk/mwic
|
f3abc4bb35292e42603285f08a55336d04795ce7
|
[
"MIT"
] | 38
|
2016-06-02T19:04:39.000Z
|
2021-07-09T18:48:40.000Z
|
tests/test_trim.py
|
jwilk/mwic
|
f3abc4bb35292e42603285f08a55336d04795ce7
|
[
"MIT"
] | 9
|
2016-05-26T13:31:11.000Z
|
2022-02-07T20:40:11.000Z
|
tests/test_trim.py
|
jwilk/mwic
|
f3abc4bb35292e42603285f08a55336d04795ce7
|
[
"MIT"
] | 7
|
2016-06-07T09:53:55.000Z
|
2019-09-19T10:59:05.000Z
|
# Copyright © 2014-2016 Jakub Wilk <jwilk@jwilk.net>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the “Software”), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from nose.tools import (
assert_equal,
assert_greater_equal,
)
import lib.text as M
def test_ltrim():
def t(s, n, expected):
result = M.ltrim(s, n)
assert_greater_equal(
max(1, n),
len(result)
)
assert_equal(result, expected)
truncations = [
'…',
'…',
'…s',
'…gs',
'eggs',
'eggs',
]
for n, s in enumerate(truncations):
t(truncations[-1], n, s)
def test_rtrim():
def t(s, n, expected):
result = M.rtrim(s, n)
assert_equal(result, expected)
truncations = [
'…',
'…',
'e…',
'eg…',
'eggs',
'eggs',
]
for n, s in enumerate(truncations):
t(truncations[-1], n, s)
# vim:ts=4 sts=4 sw=4 et
| 30.492063
| 79
| 0.647059
|
from nose.tools import (
assert_equal,
assert_greater_equal,
)
import lib.text as M
def test_ltrim():
def t(s, n, expected):
result = M.ltrim(s, n)
assert_greater_equal(
max(1, n),
len(result)
)
assert_equal(result, expected)
truncations = [
'…',
'…',
'…s',
'…gs',
'eggs',
'eggs',
]
for n, s in enumerate(truncations):
t(truncations[-1], n, s)
def test_rtrim():
def t(s, n, expected):
result = M.rtrim(s, n)
assert_equal(result, expected)
truncations = [
'…',
'…',
'e…',
'eg…',
'eggs',
'eggs',
]
for n, s in enumerate(truncations):
t(truncations[-1], n, s)
| true
| true
|
f70559b1280ee06eb3bb7d227ddfef9d0d20fdfa
| 876
|
py
|
Python
|
google/colab/_import_hooks/__init__.py
|
Gauravds435/colabtools
|
6b9972ff63689b30f1cc7dda06b0159d0e979c08
|
[
"Apache-2.0"
] | 2
|
2020-10-15T14:59:34.000Z
|
2021-02-19T15:25:01.000Z
|
google/colab/_import_hooks/__init__.py
|
Gauravds435/colabtools
|
6b9972ff63689b30f1cc7dda06b0159d0e979c08
|
[
"Apache-2.0"
] | null | null | null |
google/colab/_import_hooks/__init__.py
|
Gauravds435/colabtools
|
6b9972ff63689b30f1cc7dda06b0159d0e979c08
|
[
"Apache-2.0"
] | 2
|
2020-10-12T05:45:32.000Z
|
2020-10-12T11:09:59.000Z
|
# Copyright 2018 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language govestylerning permissions and
# limitations under the License.
"""Colab import customizations to the IPython runtime."""
from google.colab._import_hooks import _altair
from google.colab._import_hooks import _cv2
def _register_hooks():
_altair._register_hook() # pylint:disable=protected-access
_cv2._register_hook() # pylint:disable=protected-access
| 38.086957
| 74
| 0.775114
|
from google.colab._import_hooks import _altair
from google.colab._import_hooks import _cv2
def _register_hooks():
_altair._register_hook()
_cv2._register_hook()
| true
| true
|
f7055a69708df947b5140370454fbcb299ec5cce
| 6,460
|
py
|
Python
|
spec/parsers_spec.py
|
gisce/esios
|
f90d89059847d1a7034c3cc7a5898a8409ce627f
|
[
"MIT"
] | 7
|
2016-02-25T11:20:18.000Z
|
2022-03-07T20:01:36.000Z
|
spec/parsers_spec.py
|
gisce/esios
|
f90d89059847d1a7034c3cc7a5898a8409ce627f
|
[
"MIT"
] | 8
|
2017-02-28T14:50:52.000Z
|
2022-01-27T16:58:05.000Z
|
spec/parsers_spec.py
|
gisce/esios
|
f90d89059847d1a7034c3cc7a5898a8409ce627f
|
[
"MIT"
] | 6
|
2016-03-18T14:43:14.000Z
|
2022-01-12T13:04:09.000Z
|
# -*- coding: utf-8 -*-
from expects.testing import failure
from expects import *
from datetime import datetime
from dateutil.relativedelta import relativedelta
import json
import os
from esios import Esios
from esios.parsers import P48CierreParser
from pytz import timezone
LOCAL_TZ = timezone('Europe/Madrid')
UTC_TZ = timezone('UTC')
def validate_json(result):
expect(result).to(be_a(str))
data = json.loads(result)
expect(data).to(be_a(list))
expect(len(data)).to(be_above(22))
expect(data[0]).to(be_a(dict))
expect(data[0]).to(
have_keys('hour', 'up', 'value', 'cierre', 'utc_timestamp', 'local_timestamp')
)
for register in data:
# validate timestamps
local_datetime, local_offset = register['local_timestamp'].split('+')
is_dst = local_offset != '01:00'
local_ts = LOCAL_TZ.localize(datetime.strptime(local_datetime, '%Y-%m-%d %H:%M:%S'), is_dst=is_dst)
utc_ts = UTC_TZ.localize(datetime.strptime(register['utc_timestamp'], '%Y-%m-%d %H:%M:%S+00:00'))
expect(register['local_timestamp']).to_not(equal(register['utc_timestamp']))
expected_local_ts = LOCAL_TZ.normalize(utc_ts.astimezone(LOCAL_TZ))
expect(local_ts).to(equal(expected_local_ts))
def validate_data(result, start, end, cierre=None):
data = json.loads(result)
hours = int(((end - start).total_seconds() / 3600) + 1)
expect(len(data)).to(be(hours))
max_date = max([d['local_timestamp'] for d in data])
min_date = min([d['local_timestamp'] for d in data])
expect(min_date).to(equal(str(start)))
expect(max_date).to(equal(str(end)))
if cierre is not None:
for c in data:
expect(c['cierre']).to(equal(cierre))
with description('Esios Parsers'):
with before.all:
ESIOS_TOKEN = os.getenv('ESIOS_TOKEN')
self.token = ESIOS_TOKEN
self.today = datetime.today()
self.e = Esios(self.token)
with context('p48CierreParser: p48cierre files parser'):
with context('Can download data from esios'):
with it('Creates an instance'):
parser = P48CierreParser(self.e)
expect(parser).to(be_a(P48CierreParser))
with it('may be parsed as json'):
parser = P48CierreParser(self.e)
today = datetime.now()
start = LOCAL_TZ.localize(
today.replace(hour=0, minute=0, second=0, microsecond=0) - relativedelta(days=1)
)
end = LOCAL_TZ.localize(
today.replace(hour=23, minute=59, second=59, microsecond=0)
)
result = parser.get_data_json('SOMEC01', start, end)
validate_json(result)
validate_data(result, start + relativedelta(hours=1), end + relativedelta(seconds=1))
with context('parses local files'):
with it('gets a zipfile and may be parsed as json'):
parser = P48CierreParser(self.e)
result = parser.get_data_json_from_file('SOMEC01', 'spec/data/p48cierre.zip')
validate_json(result)
# contains full 2020/09/15 and full 2020/09/17
data = json.loads(result)
expect(len(data)).to(equal(48))
local_timestamps = [r['local_timestamp'] for r in data]
ts_template = '2020-09-{:02} {:02}:00:00+02:00'
# 2020/09/15
for hour in range(1, 24):
expect(local_timestamps).to(contain(ts_template.format(15, hour)))
expect(local_timestamps).to(contain(ts_template.format(16, 0)))
# 2020/09/17
for hour in range(1, 24):
expect(local_timestamps).to(contain(ts_template.format(17, hour)))
expect(local_timestamps).to(contain(ts_template.format(18, 0)))
# cierre
for c in data:
if '2020-09-15' in c['local_timestamp']:
expect(c['cierre']).to(be_true)
elif '2020-09-16' in c['local_timestamp']:
expect(c['cierre']).to(be_true)
elif '2020-09-17' in c['local_timestamp']:
expect(c['cierre']).to(be_false)
elif '2020-09-18' in c['local_timestamp']:
expect(c['cierre']).to(be_false)
with it('gets a p48cierre xml file and may be parsed as json'):
parser = P48CierreParser(self.e)
result = parser.get_data_json_from_file('SOMEC01', 'spec/data/p48cierre_20200915.xml')
validate_json(result)
validate_data(
result, LOCAL_TZ.localize(datetime(2020, 9, 15, 1, 0)), LOCAL_TZ.localize(datetime(2020, 9, 16, 0, 0), True)
)
with it('gets a p48 xml file and may be parsed as json'):
parser = P48CierreParser(self.e)
result = parser.get_data_json_from_file('SOMEC01', 'spec/data/p48_2020091618.xml')
validate_json(result)
validate_data(
result, LOCAL_TZ.localize(datetime(2020, 9, 17, 1, 0)), LOCAL_TZ.localize(datetime(2020, 9, 18, 0, 0), False)
)
with it('gets 25 registers for a p48cierre xml file from October saving time day'):
parser = P48CierreParser(self.e)
result = parser.get_data_json_from_file('SOMEC01', 'spec/data/p48cierre_20191027.xml')
validate_json(result)
validate_data(
result, LOCAL_TZ.localize(datetime(2019, 10, 27, 1, 0)), LOCAL_TZ.localize(datetime(2019, 10, 28, 0, 0)), True
)
data = json.loads(result)
expect(len(data)).to(equal(25))
with it('gets 23 registers for a p48cierre xml file from March saving time day'):
parser = P48CierreParser(self.e)
result = parser.get_data_json_from_file('SOMEC01', 'spec/data/p48cierre_20200329.xml')
validate_json(result)
validate_data(
result, LOCAL_TZ.localize(datetime(2020, 3, 29, 1, 0)), LOCAL_TZ.localize(datetime(2020, 3, 30, 0, 0)), True
)
data = json.loads(result)
expect(len(data)).to(equal(23))
| 39.631902
| 130
| 0.576161
|
from expects.testing import failure
from expects import *
from datetime import datetime
from dateutil.relativedelta import relativedelta
import json
import os
from esios import Esios
from esios.parsers import P48CierreParser
from pytz import timezone
LOCAL_TZ = timezone('Europe/Madrid')
UTC_TZ = timezone('UTC')
def validate_json(result):
expect(result).to(be_a(str))
data = json.loads(result)
expect(data).to(be_a(list))
expect(len(data)).to(be_above(22))
expect(data[0]).to(be_a(dict))
expect(data[0]).to(
have_keys('hour', 'up', 'value', 'cierre', 'utc_timestamp', 'local_timestamp')
)
for register in data:
local_datetime, local_offset = register['local_timestamp'].split('+')
is_dst = local_offset != '01:00'
local_ts = LOCAL_TZ.localize(datetime.strptime(local_datetime, '%Y-%m-%d %H:%M:%S'), is_dst=is_dst)
utc_ts = UTC_TZ.localize(datetime.strptime(register['utc_timestamp'], '%Y-%m-%d %H:%M:%S+00:00'))
expect(register['local_timestamp']).to_not(equal(register['utc_timestamp']))
expected_local_ts = LOCAL_TZ.normalize(utc_ts.astimezone(LOCAL_TZ))
expect(local_ts).to(equal(expected_local_ts))
def validate_data(result, start, end, cierre=None):
data = json.loads(result)
hours = int(((end - start).total_seconds() / 3600) + 1)
expect(len(data)).to(be(hours))
max_date = max([d['local_timestamp'] for d in data])
min_date = min([d['local_timestamp'] for d in data])
expect(min_date).to(equal(str(start)))
expect(max_date).to(equal(str(end)))
if cierre is not None:
for c in data:
expect(c['cierre']).to(equal(cierre))
with description('Esios Parsers'):
with before.all:
ESIOS_TOKEN = os.getenv('ESIOS_TOKEN')
self.token = ESIOS_TOKEN
self.today = datetime.today()
self.e = Esios(self.token)
with context('p48CierreParser: p48cierre files parser'):
with context('Can download data from esios'):
with it('Creates an instance'):
parser = P48CierreParser(self.e)
expect(parser).to(be_a(P48CierreParser))
with it('may be parsed as json'):
parser = P48CierreParser(self.e)
today = datetime.now()
start = LOCAL_TZ.localize(
today.replace(hour=0, minute=0, second=0, microsecond=0) - relativedelta(days=1)
)
end = LOCAL_TZ.localize(
today.replace(hour=23, minute=59, second=59, microsecond=0)
)
result = parser.get_data_json('SOMEC01', start, end)
validate_json(result)
validate_data(result, start + relativedelta(hours=1), end + relativedelta(seconds=1))
with context('parses local files'):
with it('gets a zipfile and may be parsed as json'):
parser = P48CierreParser(self.e)
result = parser.get_data_json_from_file('SOMEC01', 'spec/data/p48cierre.zip')
validate_json(result)
data = json.loads(result)
expect(len(data)).to(equal(48))
local_timestamps = [r['local_timestamp'] for r in data]
ts_template = '2020-09-{:02} {:02}:00:00+02:00'
for hour in range(1, 24):
expect(local_timestamps).to(contain(ts_template.format(15, hour)))
expect(local_timestamps).to(contain(ts_template.format(16, 0)))
for hour in range(1, 24):
expect(local_timestamps).to(contain(ts_template.format(17, hour)))
expect(local_timestamps).to(contain(ts_template.format(18, 0)))
for c in data:
if '2020-09-15' in c['local_timestamp']:
expect(c['cierre']).to(be_true)
elif '2020-09-16' in c['local_timestamp']:
expect(c['cierre']).to(be_true)
elif '2020-09-17' in c['local_timestamp']:
expect(c['cierre']).to(be_false)
elif '2020-09-18' in c['local_timestamp']:
expect(c['cierre']).to(be_false)
with it('gets a p48cierre xml file and may be parsed as json'):
parser = P48CierreParser(self.e)
result = parser.get_data_json_from_file('SOMEC01', 'spec/data/p48cierre_20200915.xml')
validate_json(result)
validate_data(
result, LOCAL_TZ.localize(datetime(2020, 9, 15, 1, 0)), LOCAL_TZ.localize(datetime(2020, 9, 16, 0, 0), True)
)
with it('gets a p48 xml file and may be parsed as json'):
parser = P48CierreParser(self.e)
result = parser.get_data_json_from_file('SOMEC01', 'spec/data/p48_2020091618.xml')
validate_json(result)
validate_data(
result, LOCAL_TZ.localize(datetime(2020, 9, 17, 1, 0)), LOCAL_TZ.localize(datetime(2020, 9, 18, 0, 0), False)
)
with it('gets 25 registers for a p48cierre xml file from October saving time day'):
parser = P48CierreParser(self.e)
result = parser.get_data_json_from_file('SOMEC01', 'spec/data/p48cierre_20191027.xml')
validate_json(result)
validate_data(
result, LOCAL_TZ.localize(datetime(2019, 10, 27, 1, 0)), LOCAL_TZ.localize(datetime(2019, 10, 28, 0, 0)), True
)
data = json.loads(result)
expect(len(data)).to(equal(25))
with it('gets 23 registers for a p48cierre xml file from March saving time day'):
parser = P48CierreParser(self.e)
result = parser.get_data_json_from_file('SOMEC01', 'spec/data/p48cierre_20200329.xml')
validate_json(result)
validate_data(
result, LOCAL_TZ.localize(datetime(2020, 3, 29, 1, 0)), LOCAL_TZ.localize(datetime(2020, 3, 30, 0, 0)), True
)
data = json.loads(result)
expect(len(data)).to(equal(23))
| true
| true
|
f7055c47f2348aba66bd852b9d2acd9572d37cb7
| 3,398
|
py
|
Python
|
tareas/2/CorreaAlfredo/deGatosYRatones.py
|
FrancisBL5/sistop-2022-2
|
5c9c7363bbf2143b44b5886a9e6d51614218ffd8
|
[
"CC-BY-4.0"
] | 9
|
2022-02-03T00:16:01.000Z
|
2022-02-25T06:30:46.000Z
|
tareas/2/CorreaAlfredo/deGatosYRatones.py
|
FrancisBL5/sistop-2022-2
|
5c9c7363bbf2143b44b5886a9e6d51614218ffd8
|
[
"CC-BY-4.0"
] | 27
|
2022-02-08T18:48:49.000Z
|
2022-03-16T19:44:05.000Z
|
tareas/2/CorreaAlfredo/deGatosYRatones.py
|
FrancisBL5/sistop-2022-2
|
5c9c7363bbf2143b44b5886a9e6d51614218ffd8
|
[
"CC-BY-4.0"
] | 31
|
2022-02-03T00:17:14.000Z
|
2022-03-31T15:13:40.000Z
|
# -*- coding: utf-8 -*-
"""
Correa González Alfredo
De gatos y ratones
- Tengo k gatos (e I ratones) en casa.
- Les sirvo comida a mis gatos en m platos.
- Gatos y ratones han llegado a un acuerdo para repartirse el
tiempo y comida pero tienen que convencerme que están haciendo
su trabajo
- Los gatos pueden comer en sus m platos de comida.
- Los ratones pueden comer en esos platos siempre y cuando
no sean vistos.
- Si un gato ve a un ratón comiendo, se lo debe comer.
- Los platos están puestos uno junto al otro.
- Solo un animal puede comer en un plato a la vez.
- Si un gato está comiendo y ve a un ratón que comienza a comer
de oitro plato, el gato se lo ve y se lo come.
- Por acuerdo de caballeros, los gatos no pueden acercarse
a los platos mientras haya ratones comiendo.
"""
from threading import Semaphore, Thread, Event
import threading
import time
import random
hambreDeGato = 100
hambreDeRaton = 2
numeroDeGatos = 2
numeroDeRatones = 10
platos = []
p = 5
gatosComiendo = 0
ratonesComiendo = 0
mutex_hambreGato = threading.Semaphore(1)
mutex_hambreRaton = threading.Semaphore(1)
entrar_a_comer = Semaphore(1)
def gato(id,m):
global gatosComiendo, ratonesComiendo, platos, numeroDeRatones
while numeroDeRatones != 0:
time.sleep(random.random() / hambreDeGato)
entrar_a_comer.acquire()
entrar_a_comer.release()
mutex_hambreGato.acquire()
if ratonesComiendo > 0:
print("Gato {} no se acerca a los platos por su orgullo de caballero".format(id))
mutex_hambreGato.release()
else:
platos[id%m].acquire()
print("El gato {} comienza a comer del plato {}".format(id, id%m))
gatosComiendo = gatosComiendo + 1
print("El gato {} terminó de comer".format(id))
gatosComiendo = gatosComiendo - 1
platos[id%m].release()
mutex_hambreGato.release()
def raton(id,m):
global gatosComiendo, ratonesComiendo, platos, numeroDeRatones
while numeroDeRatones != 0:
time.sleep(random.random() / hambreDeRaton)
entrar_a_comer.acquire()
entrar_a_comer.release()
mutex_hambreRaton.acquire()
if gatosComiendo > 0:
print("Se comieron al ratón {}".format(id))
ratonesComiendo = ratonesComiendo - 1
numeroDeRatones = numeroDeRatones - 1
if(numeroDeRatones == 0):
print("¡¡¡¡¡SE MURIERON TODOS LOS RATONES :(!!!!!")
time.sleep(10000)
mutex_hambreRaton.release()
else:
platos[id%m].acquire()
print("El ratón {} comienza a comer en el plato {}".format(id, id%m))
ratonesComiendo = ratonesComiendo + 1
print("El ratón {} terminó de comer".format(id))
ratonesComiendo = ratonesComiendo - 1
platos[id%m].release()
mutex_hambreRaton.release()
for i in range(p):
platos.append(Semaphore(1))
for i in range(numeroDeGatos):
Thread(target = gato, args = [i,p]).start()
for i in range(numeroDeRatones):
Thread(target = raton, args = [i,p]).start()
| 28.554622
| 93
| 0.604768
|
from threading import Semaphore, Thread, Event
import threading
import time
import random
hambreDeGato = 100
hambreDeRaton = 2
numeroDeGatos = 2
numeroDeRatones = 10
platos = []
p = 5
gatosComiendo = 0
ratonesComiendo = 0
mutex_hambreGato = threading.Semaphore(1)
mutex_hambreRaton = threading.Semaphore(1)
entrar_a_comer = Semaphore(1)
def gato(id,m):
global gatosComiendo, ratonesComiendo, platos, numeroDeRatones
while numeroDeRatones != 0:
time.sleep(random.random() / hambreDeGato)
entrar_a_comer.acquire()
entrar_a_comer.release()
mutex_hambreGato.acquire()
if ratonesComiendo > 0:
print("Gato {} no se acerca a los platos por su orgullo de caballero".format(id))
mutex_hambreGato.release()
else:
platos[id%m].acquire()
print("El gato {} comienza a comer del plato {}".format(id, id%m))
gatosComiendo = gatosComiendo + 1
print("El gato {} terminó de comer".format(id))
gatosComiendo = gatosComiendo - 1
platos[id%m].release()
mutex_hambreGato.release()
def raton(id,m):
global gatosComiendo, ratonesComiendo, platos, numeroDeRatones
while numeroDeRatones != 0:
time.sleep(random.random() / hambreDeRaton)
entrar_a_comer.acquire()
entrar_a_comer.release()
mutex_hambreRaton.acquire()
if gatosComiendo > 0:
print("Se comieron al ratón {}".format(id))
ratonesComiendo = ratonesComiendo - 1
numeroDeRatones = numeroDeRatones - 1
if(numeroDeRatones == 0):
print("¡¡¡¡¡SE MURIERON TODOS LOS RATONES :(!!!!!")
time.sleep(10000)
mutex_hambreRaton.release()
else:
platos[id%m].acquire()
print("El ratón {} comienza a comer en el plato {}".format(id, id%m))
ratonesComiendo = ratonesComiendo + 1
print("El ratón {} terminó de comer".format(id))
ratonesComiendo = ratonesComiendo - 1
platos[id%m].release()
mutex_hambreRaton.release()
for i in range(p):
platos.append(Semaphore(1))
for i in range(numeroDeGatos):
Thread(target = gato, args = [i,p]).start()
for i in range(numeroDeRatones):
Thread(target = raton, args = [i,p]).start()
| true
| true
|
f7055c54768f4c8845f7bfc40c698c57b626f1b2
| 3,204
|
py
|
Python
|
Scripts/convert_png_tiles.py
|
TheOpponent/st3-translation-notes
|
c78d7c2347611c07677ec5e293bbd6351800f438
|
[
"Unlicense"
] | null | null | null |
Scripts/convert_png_tiles.py
|
TheOpponent/st3-translation-notes
|
c78d7c2347611c07677ec5e293bbd6351800f438
|
[
"Unlicense"
] | 3
|
2022-03-27T17:05:09.000Z
|
2022-03-31T13:45:59.000Z
|
Scripts/convert_png_tiles.py
|
TheOpponent/st3-translation-notes
|
c78d7c2347611c07677ec5e293bbd6351800f438
|
[
"Unlicense"
] | null | null | null |
# This script reads a PNG file containing a single row of 26 x 26 tiles and outputs binary data.
# NumPy and Pillow are required as dependencies.
#
# Specify an input PNG file and an optional output file as arguments.
# If an output file is not given, the binary data will be written in the console.
#
# The original graphic format is 4 bits per pixel, with each byte representing two pixels stacked vertically.
# The left nybble represents the lower pixel and the right nybble represents the upper pixel.
# 13 rows of these bytes create a 26 x 26 tile.
#
# To create replacement tiles, create a non-transparent image with the following 16-color palette:
# 000000 101010 202020 303030 404040 505050 606060 707070 808080 909090 A0A0A0 B0B0B0 C0C0C0 D0D0D0 E0E0E0 F0F0F0
#
# Although the resulting image will be grayscale, this image should be saved as 8-bit RGB.
# Image editors will frequently override indexed palettes when converting to grayscale,
# so creating RGB images is recommended to guarantee the palette will not be changed.
# The first channel (red) of this file will be read and used as pixel data.
#
# Overwrite SKFONT.CG with the output starting at the tile offset to replace.
import struct
import sys
import numpy as np
from PIL import Image
def main():
if len(sys.argv) < 2:
print("Specify input PNG file.")
return
with Image.open(sys.argv[1]) as input_file:
output = b''
# Read image and split into equal number of 26 x 26 arrays.
image = list(input_file.getdata(0))
image_size = input_file.size
image_2d = np.empty((image_size[1],image_size[0]),dtype="uint8")
# rows = image[2]
try:
for i in range(0,25):
image_2d[i] = image[i * image_size[0]:(i + 1) * image_size[0]]
# Split into individual tiles.
tiles = np.hsplit(image_2d,image_size[0] / 26)
for i in tiles:
# Bitwise shift 4 to the right to obtain 0-F value for each pixel.
tile = np.right_shift(i,4)
# Divide each tile into 26 x 2 arrays.
tile_row_pairs = np.vsplit(tile,13)
for row_pair in tile_row_pairs:
for column in range(0,26):
# Upper pixel is right nybble; lower pixel is left nybble.
upper_pixel = row_pair[0][column]
lower_pixel = row_pair[1][column] << 4
pixels = upper_pixel + lower_pixel
output += struct.pack("=B",pixels)
except ValueError:
print("Input PNG file must be 8-bit, no transparency, and have a height of 26 pixels and width a multiple of 26 pixels.")
return
if len(sys.argv) >= 3:
with open(sys.argv[2],"wb") as output_file:
output_file.write(output)
print(f"Paste the contents of {sys.argv[2]} into SKFONT.CG starting at the tile(s) to replace.")
else:
print(output.hex())
print("\nPaste the above hex into SKFONT.CG starting at the tile(s) to replace.")
if __name__ == "__main__":
main()
| 40.556962
| 133
| 0.634207
|
import struct
import sys
import numpy as np
from PIL import Image
def main():
if len(sys.argv) < 2:
print("Specify input PNG file.")
return
with Image.open(sys.argv[1]) as input_file:
output = b''
image = list(input_file.getdata(0))
image_size = input_file.size
image_2d = np.empty((image_size[1],image_size[0]),dtype="uint8")
try:
for i in range(0,25):
image_2d[i] = image[i * image_size[0]:(i + 1) * image_size[0]]
tiles = np.hsplit(image_2d,image_size[0] / 26)
for i in tiles:
tile = np.right_shift(i,4)
tile_row_pairs = np.vsplit(tile,13)
for row_pair in tile_row_pairs:
for column in range(0,26):
upper_pixel = row_pair[0][column]
lower_pixel = row_pair[1][column] << 4
pixels = upper_pixel + lower_pixel
output += struct.pack("=B",pixels)
except ValueError:
print("Input PNG file must be 8-bit, no transparency, and have a height of 26 pixels and width a multiple of 26 pixels.")
return
if len(sys.argv) >= 3:
with open(sys.argv[2],"wb") as output_file:
output_file.write(output)
print(f"Paste the contents of {sys.argv[2]} into SKFONT.CG starting at the tile(s) to replace.")
else:
print(output.hex())
print("\nPaste the above hex into SKFONT.CG starting at the tile(s) to replace.")
if __name__ == "__main__":
main()
| true
| true
|
f7055f2933d1e39838b93296dc3c5e19f07f44fb
| 9,876
|
py
|
Python
|
tools/config.py
|
0x53A/emscripten
|
dfb6fdadfd68b1478cda4654f55412552f7d8d09
|
[
"MIT"
] | 1
|
2021-11-27T07:11:09.000Z
|
2021-11-27T07:11:09.000Z
|
tools/config.py
|
thomasballinger/emscripten
|
c5928fec6e09f84872e9297806b44d828f1f0a05
|
[
"MIT"
] | 1
|
2021-12-19T02:20:43.000Z
|
2021-12-19T02:20:43.000Z
|
tools/config.py
|
thomasballinger/emscripten
|
c5928fec6e09f84872e9297806b44d828f1f0a05
|
[
"MIT"
] | null | null | null |
# Copyright 2020 The Emscripten Authors. All rights reserved.
# Emscripten is available under two separate licenses, the MIT license and the
# University of Illinois/NCSA Open Source License. Both these licenses can be
# found in the LICENSE file.
import os
import sys
import logging
from .utils import path_from_root, exit_with_error, __rootpath__, which
logger = logging.getLogger('shared')
# The following class can be overridden by the config file and/or
# environment variables. Specifically any variable whose name
# is in ALL_UPPER_CASE is condifered a valid config file key.
# See parse_config_file below.
EMSCRIPTEN_ROOT = __rootpath__
NODE_JS = None
BINARYEN_ROOT = None
SPIDERMONKEY_ENGINE = None
V8_ENGINE = None
LLVM_ROOT = None
LLVM_ADD_VERSION = None
CLANG_ADD_VERSION = None
CLOSURE_COMPILER = None
JAVA = None
JS_ENGINE = None
JS_ENGINES = None
WASMER = None
WASMTIME = None
WASM_ENGINES = []
FROZEN_CACHE = None
CACHE = None
PORTS = None
COMPILER_WRAPPER = None
def listify(x):
if type(x) is not list:
return [x]
return x
def fix_js_engine(old, new):
if old is None:
return
global JS_ENGINES
JS_ENGINES = [new if x == old else x for x in JS_ENGINES]
return new
def root_is_writable():
return os.access(__rootpath__, os.W_OK)
def normalize_config_settings():
global CACHE, PORTS, JAVA, LLVM_ADD_VERSION, CLANG_ADD_VERSION
global NODE_JS, V8_ENGINE, JS_ENGINE, JS_ENGINES, SPIDERMONKEY_ENGINE, WASM_ENGINES
# EM_CONFIG stuff
if not JS_ENGINES:
JS_ENGINES = [NODE_JS]
if not JS_ENGINE:
JS_ENGINE = JS_ENGINES[0]
# Engine tweaks
if SPIDERMONKEY_ENGINE:
new_spidermonkey = SPIDERMONKEY_ENGINE
if '-w' not in str(new_spidermonkey):
new_spidermonkey += ['-w']
SPIDERMONKEY_ENGINE = fix_js_engine(SPIDERMONKEY_ENGINE, new_spidermonkey)
NODE_JS = fix_js_engine(NODE_JS, listify(NODE_JS))
V8_ENGINE = fix_js_engine(V8_ENGINE, listify(V8_ENGINE))
JS_ENGINE = fix_js_engine(JS_ENGINE, listify(JS_ENGINE))
JS_ENGINES = [listify(engine) for engine in JS_ENGINES]
WASM_ENGINES = [listify(engine) for engine in WASM_ENGINES]
if not CACHE:
if root_is_writable():
CACHE = path_from_root('cache')
else:
# Use the legacy method of putting the cache in the user's home directory
# if the emscripten root is not writable.
# This is useful mostly for read-only installation and perhaps could
# be removed in the future since such installations should probably be
# setting a specific cache location.
logger.debug('Using home-directory for emscripten cache due to read-only root')
CACHE = os.path.expanduser(os.path.join('~', '.emscripten_cache'))
if not PORTS:
PORTS = os.path.join(CACHE, 'ports')
if JAVA is None:
logger.debug('JAVA not defined in ' + config_file_location() + ', using "java"')
JAVA = 'java'
# Tools/paths
if LLVM_ADD_VERSION is None:
LLVM_ADD_VERSION = os.getenv('LLVM_ADD_VERSION')
if CLANG_ADD_VERSION is None:
CLANG_ADD_VERSION = os.getenv('CLANG_ADD_VERSION')
def parse_config_file():
"""Parse the emscripten config file using python's exec.
Also check EM_<KEY> environment variables to override specific config keys.
"""
config = {}
config_text = open(config_file, 'r').read() if config_file else EM_CONFIG
try:
exec(config_text, config)
except Exception as e:
exit_with_error('Error in evaluating %s (at %s): %s, text: %s', EM_CONFIG, config_file, str(e), config_text)
CONFIG_KEYS = (
'NODE_JS',
'BINARYEN_ROOT',
'SPIDERMONKEY_ENGINE',
'V8_ENGINE',
'LLVM_ROOT',
'LLVM_ADD_VERSION',
'CLANG_ADD_VERSION',
'CLOSURE_COMPILER',
'JAVA',
'JS_ENGINE',
'JS_ENGINES',
'WASMER',
'WASMTIME',
'WASM_ENGINES',
'FROZEN_CACHE',
'CACHE',
'PORTS',
'COMPILER_WRAPPER',
)
# Only propagate certain settings from the config file.
for key in CONFIG_KEYS:
env_var = 'EM_' + key
env_value = os.environ.get(env_var)
if env_value is not None:
globals()[key] = env_value
elif key in config:
globals()[key] = config[key]
# Certain keys are mandatory
for key in ('LLVM_ROOT', 'NODE_JS', 'BINARYEN_ROOT'):
if key not in config:
exit_with_error('%s is not defined in %s', key, config_file_location())
if not globals()[key]:
exit_with_error('%s is set to empty value in %s', key, config_file_location())
if not NODE_JS:
exit_with_error('NODE_JS is not defined in %s', config_file_location())
normalize_config_settings()
# Returns the location of the emscripten config file.
def config_file_location():
# Handle the case where there is no config file at all (i.e. If EM_CONFIG is passed as python code
# direclty on the command line).
if not config_file:
return '<inline config>'
return config_file
def generate_config(path, first_time=False):
# Note: repr is used to ensure the paths are escaped correctly on Windows.
# The full string is replaced so that the template stays valid Python.
config_file = open(path_from_root('tools', 'settings_template.py')).read().splitlines()
config_file = config_file[3:] # remove the initial comment
config_file = '\n'.join(config_file)
# autodetect some default paths
config_file = config_file.replace('\'{{{ EMSCRIPTEN_ROOT }}}\'', repr(__rootpath__))
llvm_root = os.path.dirname(which('llvm-dis') or '/usr/bin/llvm-dis')
config_file = config_file.replace('\'{{{ LLVM_ROOT }}}\'', repr(llvm_root))
node = which('nodejs') or which('node') or 'node'
config_file = config_file.replace('\'{{{ NODE }}}\'', repr(node))
abspath = os.path.abspath(os.path.expanduser(path))
# write
with open(abspath, 'w') as f:
f.write(config_file)
if first_time:
print('''
==============================================================================
Welcome to Emscripten!
This is the first time any of the Emscripten tools has been run.
A settings file has been copied to %s, at absolute path: %s
It contains our best guesses for the important paths, which are:
LLVM_ROOT = %s
NODE_JS = %s
EMSCRIPTEN_ROOT = %s
Please edit the file if any of those are incorrect.
This command will now exit. When you are done editing those paths, re-run it.
==============================================================================
''' % (path, abspath, llvm_root, node, __rootpath__), file=sys.stderr)
# Emscripten configuration is done through the --em-config command line option
# or the EM_CONFIG environment variable. If the specified string value contains
# newline or semicolon-separated definitions, then these definitions will be
# used to configure Emscripten. Otherwise, the string is understood to be a
# path to a settings file that contains the required definitions.
# The search order from the config file is as follows:
# 1. Specified on the command line (--em-config)
# 2. Specified via EM_CONFIG environment variable
# 3. Local .emscripten file, if found
# 4. Local .emscripten file, as used by `emsdk --embedded` (two levels above,
# see below)
# 5. User home directory config (~/.emscripten), if found.
embedded_config = path_from_root('.emscripten')
# For compatibility with `emsdk --embedded` mode also look two levels up. The
# layout of the emsdk puts emcc two levels below emsdk. For exmaple:
# - emsdk/upstream/emscripten/emcc
# - emsdk/emscipten/1.38.31/emcc
# However `emsdk --embedded` stores the config file in the emsdk root.
# Without this check, when emcc is run from within the emsdk in embedded mode
# and the user forgets to first run `emsdk_env.sh` (which sets EM_CONFIG) emcc
# will not see any config file at all and fall back to creating a new/emtpy
# one.
# We could remove this special case if emsdk were to write its embedded config
# file into the emscripten directory itself.
# See: https://github.com/emscripten-core/emsdk/pull/367
emsdk_root = os.path.dirname(os.path.dirname(path_from_root()))
emsdk_embedded_config = os.path.join(emsdk_root, '.emscripten')
user_home_config = os.path.expanduser('~/.emscripten')
if '--em-config' in sys.argv:
EM_CONFIG = sys.argv[sys.argv.index('--em-config') + 1]
# And now remove it from sys.argv
skip = False
newargs = []
for arg in sys.argv:
if not skip and arg != '--em-config':
newargs += [arg]
elif arg == '--em-config':
skip = True
elif skip:
skip = False
sys.argv = newargs
if not os.path.isfile(EM_CONFIG):
if EM_CONFIG.startswith('-'):
exit_with_error('Passed --em-config without an argument. Usage: --em-config /path/to/.emscripten or --em-config LLVM_ROOT=/path;...')
if '=' not in EM_CONFIG:
exit_with_error('File ' + EM_CONFIG + ' passed to --em-config does not exist!')
else:
EM_CONFIG = EM_CONFIG.replace(';', '\n') + '\n'
elif 'EM_CONFIG' in os.environ:
EM_CONFIG = os.environ['EM_CONFIG']
elif os.path.exists(embedded_config):
EM_CONFIG = embedded_config
elif os.path.exists(emsdk_embedded_config):
EM_CONFIG = emsdk_embedded_config
elif os.path.exists(user_home_config):
EM_CONFIG = user_home_config
else:
if root_is_writable():
generate_config(embedded_config, first_time=True)
else:
generate_config(user_home_config, first_time=True)
sys.exit(0)
if '\n' in EM_CONFIG:
config_file = None
logger.debug('config is specified inline without a file')
else:
config_file = os.path.expanduser(EM_CONFIG)
logger.debug('emscripten config is located in ' + config_file)
if not os.path.exists(config_file):
exit_with_error('emscripten config file not found: ' + config_file)
# Emscripten compiler spawns other processes, which can reimport shared.py, so
# make sure that those child processes get the same configuration file by
# setting it to the currently active environment.
os.environ['EM_CONFIG'] = EM_CONFIG
parse_config_file()
| 34.17301
| 139
| 0.709093
|
import os
import sys
import logging
from .utils import path_from_root, exit_with_error, __rootpath__, which
logger = logging.getLogger('shared')
EMSCRIPTEN_ROOT = __rootpath__
NODE_JS = None
BINARYEN_ROOT = None
SPIDERMONKEY_ENGINE = None
V8_ENGINE = None
LLVM_ROOT = None
LLVM_ADD_VERSION = None
CLANG_ADD_VERSION = None
CLOSURE_COMPILER = None
JAVA = None
JS_ENGINE = None
JS_ENGINES = None
WASMER = None
WASMTIME = None
WASM_ENGINES = []
FROZEN_CACHE = None
CACHE = None
PORTS = None
COMPILER_WRAPPER = None
def listify(x):
if type(x) is not list:
return [x]
return x
def fix_js_engine(old, new):
if old is None:
return
global JS_ENGINES
JS_ENGINES = [new if x == old else x for x in JS_ENGINES]
return new
def root_is_writable():
return os.access(__rootpath__, os.W_OK)
def normalize_config_settings():
global CACHE, PORTS, JAVA, LLVM_ADD_VERSION, CLANG_ADD_VERSION
global NODE_JS, V8_ENGINE, JS_ENGINE, JS_ENGINES, SPIDERMONKEY_ENGINE, WASM_ENGINES
if not JS_ENGINES:
JS_ENGINES = [NODE_JS]
if not JS_ENGINE:
JS_ENGINE = JS_ENGINES[0]
if SPIDERMONKEY_ENGINE:
new_spidermonkey = SPIDERMONKEY_ENGINE
if '-w' not in str(new_spidermonkey):
new_spidermonkey += ['-w']
SPIDERMONKEY_ENGINE = fix_js_engine(SPIDERMONKEY_ENGINE, new_spidermonkey)
NODE_JS = fix_js_engine(NODE_JS, listify(NODE_JS))
V8_ENGINE = fix_js_engine(V8_ENGINE, listify(V8_ENGINE))
JS_ENGINE = fix_js_engine(JS_ENGINE, listify(JS_ENGINE))
JS_ENGINES = [listify(engine) for engine in JS_ENGINES]
WASM_ENGINES = [listify(engine) for engine in WASM_ENGINES]
if not CACHE:
if root_is_writable():
CACHE = path_from_root('cache')
else:
# if the emscripten root is not writable.
# This is useful mostly for read-only installation and perhaps could
# be removed in the future since such installations should probably be
# setting a specific cache location.
logger.debug('Using home-directory for emscripten cache due to read-only root')
CACHE = os.path.expanduser(os.path.join('~', '.emscripten_cache'))
if not PORTS:
PORTS = os.path.join(CACHE, 'ports')
if JAVA is None:
logger.debug('JAVA not defined in ' + config_file_location() + ', using "java"')
JAVA = 'java'
# Tools/paths
if LLVM_ADD_VERSION is None:
LLVM_ADD_VERSION = os.getenv('LLVM_ADD_VERSION')
if CLANG_ADD_VERSION is None:
CLANG_ADD_VERSION = os.getenv('CLANG_ADD_VERSION')
def parse_config_file():
config = {}
config_text = open(config_file, 'r').read() if config_file else EM_CONFIG
try:
exec(config_text, config)
except Exception as e:
exit_with_error('Error in evaluating %s (at %s): %s, text: %s', EM_CONFIG, config_file, str(e), config_text)
CONFIG_KEYS = (
'NODE_JS',
'BINARYEN_ROOT',
'SPIDERMONKEY_ENGINE',
'V8_ENGINE',
'LLVM_ROOT',
'LLVM_ADD_VERSION',
'CLANG_ADD_VERSION',
'CLOSURE_COMPILER',
'JAVA',
'JS_ENGINE',
'JS_ENGINES',
'WASMER',
'WASMTIME',
'WASM_ENGINES',
'FROZEN_CACHE',
'CACHE',
'PORTS',
'COMPILER_WRAPPER',
)
# Only propagate certain settings from the config file.
for key in CONFIG_KEYS:
env_var = 'EM_' + key
env_value = os.environ.get(env_var)
if env_value is not None:
globals()[key] = env_value
elif key in config:
globals()[key] = config[key]
# Certain keys are mandatory
for key in ('LLVM_ROOT', 'NODE_JS', 'BINARYEN_ROOT'):
if key not in config:
exit_with_error('%s is not defined in %s', key, config_file_location())
if not globals()[key]:
exit_with_error('%s is set to empty value in %s', key, config_file_location())
if not NODE_JS:
exit_with_error('NODE_JS is not defined in %s', config_file_location())
normalize_config_settings()
# Returns the location of the emscripten config file.
def config_file_location():
# Handle the case where there is no config file at all (i.e. If EM_CONFIG is passed as python code
# direclty on the command line).
if not config_file:
return '<inline config>'
return config_file
def generate_config(path, first_time=False):
# Note: repr is used to ensure the paths are escaped correctly on Windows.
# The full string is replaced so that the template stays valid Python.
config_file = open(path_from_root('tools', 'settings_template.py')).read().splitlines()
config_file = config_file[3:] # remove the initial comment
config_file = '\n'.join(config_file)
# autodetect some default paths
config_file = config_file.replace('\'{{{ EMSCRIPTEN_ROOT }}}\'', repr(__rootpath__))
llvm_root = os.path.dirname(which('llvm-dis') or '/usr/bin/llvm-dis')
config_file = config_file.replace('\'{{{ LLVM_ROOT }}}\'', repr(llvm_root))
node = which('nodejs') or which('node') or 'node'
config_file = config_file.replace('\'{{{ NODE }}}\'', repr(node))
abspath = os.path.abspath(os.path.expanduser(path))
# write
with open(abspath, 'w') as f:
f.write(config_file)
if first_time:
print('''
==============================================================================
Welcome to Emscripten!
This is the first time any of the Emscripten tools has been run.
A settings file has been copied to %s, at absolute path: %s
It contains our best guesses for the important paths, which are:
LLVM_ROOT = %s
NODE_JS = %s
EMSCRIPTEN_ROOT = %s
Please edit the file if any of those are incorrect.
This command will now exit. When you are done editing those paths, re-run it.
==============================================================================
''' % (path, abspath, llvm_root, node, __rootpath__), file=sys.stderr)
# Emscripten configuration is done through the --em-config command line option
# or the EM_CONFIG environment variable. If the specified string value contains
# newline or semicolon-separated definitions, then these definitions will be
# used to configure Emscripten. Otherwise, the string is understood to be a
# path to a settings file that contains the required definitions.
# The search order from the config file is as follows:
# 1. Specified on the command line (--em-config)
# 2. Specified via EM_CONFIG environment variable
# 3. Local .emscripten file, if found
# 4. Local .emscripten file, as used by `emsdk --embedded` (two levels above,
# see below)
# 5. User home directory config (~/.emscripten), if found.
embedded_config = path_from_root('.emscripten')
# For compatibility with `emsdk --embedded` mode also look two levels up. The
# layout of the emsdk puts emcc two levels below emsdk. For exmaple:
# - emsdk/upstream/emscripten/emcc
# - emsdk/emscipten/1.38.31/emcc
# However `emsdk --embedded` stores the config file in the emsdk root.
# Without this check, when emcc is run from within the emsdk in embedded mode
# and the user forgets to first run `emsdk_env.sh` (which sets EM_CONFIG) emcc
# will not see any config file at all and fall back to creating a new/emtpy
# one.
# We could remove this special case if emsdk were to write its embedded config
# file into the emscripten directory itself.
# See: https://github.com/emscripten-core/emsdk/pull/367
emsdk_root = os.path.dirname(os.path.dirname(path_from_root()))
emsdk_embedded_config = os.path.join(emsdk_root, '.emscripten')
user_home_config = os.path.expanduser('~/.emscripten')
if '--em-config' in sys.argv:
EM_CONFIG = sys.argv[sys.argv.index('--em-config') + 1]
# And now remove it from sys.argv
skip = False
newargs = []
for arg in sys.argv:
if not skip and arg != '--em-config':
newargs += [arg]
elif arg == '--em-config':
skip = True
elif skip:
skip = False
sys.argv = newargs
if not os.path.isfile(EM_CONFIG):
if EM_CONFIG.startswith('-'):
exit_with_error('Passed --em-config without an argument. Usage: --em-config /path/to/.emscripten or --em-config LLVM_ROOT=/path;...')
if '=' not in EM_CONFIG:
exit_with_error('File ' + EM_CONFIG + ' passed to --em-config does not exist!')
else:
EM_CONFIG = EM_CONFIG.replace(';', '\n') + '\n'
elif 'EM_CONFIG' in os.environ:
EM_CONFIG = os.environ['EM_CONFIG']
elif os.path.exists(embedded_config):
EM_CONFIG = embedded_config
elif os.path.exists(emsdk_embedded_config):
EM_CONFIG = emsdk_embedded_config
elif os.path.exists(user_home_config):
EM_CONFIG = user_home_config
else:
if root_is_writable():
generate_config(embedded_config, first_time=True)
else:
generate_config(user_home_config, first_time=True)
sys.exit(0)
if '\n' in EM_CONFIG:
config_file = None
logger.debug('config is specified inline without a file')
else:
config_file = os.path.expanduser(EM_CONFIG)
logger.debug('emscripten config is located in ' + config_file)
if not os.path.exists(config_file):
exit_with_error('emscripten config file not found: ' + config_file)
# Emscripten compiler spawns other processes, which can reimport shared.py, so
# make sure that those child processes get the same configuration file by
# setting it to the currently active environment.
os.environ['EM_CONFIG'] = EM_CONFIG
parse_config_file()
| true
| true
|
f7055f5a813d438b1a9eb2a7915b1bbd2c8e55ef
| 678
|
py
|
Python
|
redirink/users/models.py
|
Egor4ik325/redirink
|
17ef85f48145ee6112f2fcbab60dcd9d65ba78bf
|
[
"MIT"
] | null | null | null |
redirink/users/models.py
|
Egor4ik325/redirink
|
17ef85f48145ee6112f2fcbab60dcd9d65ba78bf
|
[
"MIT"
] | null | null | null |
redirink/users/models.py
|
Egor4ik325/redirink
|
17ef85f48145ee6112f2fcbab60dcd9d65ba78bf
|
[
"MIT"
] | 1
|
2021-12-31T00:46:31.000Z
|
2021-12-31T00:46:31.000Z
|
from django.contrib.auth.models import AbstractUser
from django.db.models import CharField
from django.urls import reverse
from django.utils.translation import gettext_lazy as _
class User(AbstractUser):
"""Default user for Redirink."""
#: First and last name do not cover name patterns around the globe
name = CharField(_("Name of User"), blank=True, max_length=255)
first_name = None # type: ignore
last_name = None # type: ignore
def get_absolute_url(self):
"""Get url for user's detail view.
Returns:
str: URL for user detail.
"""
return reverse("users:detail", kwargs={"username": self.username})
| 29.478261
| 74
| 0.682891
|
from django.contrib.auth.models import AbstractUser
from django.db.models import CharField
from django.urls import reverse
from django.utils.translation import gettext_lazy as _
class User(AbstractUser):
name = CharField(_("Name of User"), blank=True, max_length=255)
first_name = None
last_name = None
def get_absolute_url(self):
return reverse("users:detail", kwargs={"username": self.username})
| true
| true
|
f7056011dbb1ac21708cb6fd697c34c2e5888adc
| 20,242
|
py
|
Python
|
generate_eval_file.py
|
JRC1995/SocialMediaNER
|
236b22ded48f64516ebf0577c3b9d9d907db84e0
|
[
"MIT"
] | null | null | null |
generate_eval_file.py
|
JRC1995/SocialMediaNER
|
236b22ded48f64516ebf0577c3b9d9d907db84e0
|
[
"MIT"
] | null | null | null |
generate_eval_file.py
|
JRC1995/SocialMediaNER
|
236b22ded48f64516ebf0577c3b9d9d907db84e0
|
[
"MIT"
] | null | null | null |
import numpy as np
import random
from dataLoader.batch import batcher
from transformers import BertTokenizerFast, ElectraTokenizerFast
from configs.WNUT_configs import *
from utils.ml_utils import *
from utils.data_utils import *
from utils.metric_utils import *
import argparse
from tqdm import tqdm
from pathlib import Path
import os
import torch as T
import torch.nn as nn
from models.BigTransformerTagger import BigTransformerTagger
from models.CSETagger import CSETagger
from models.layers.BigTransformers.BERT import BertModel
from models.layers.BigTransformers.ELECTRA import ElectraModel
from models.cse_generator import CSEGenerator
import json
import sys
import re
"""
FUTURE STUFF TO KEEP IN MIND:
"""
"""
TRY SAVE BY LOSS IN THE FUTURE
"""
"""
IN FUTURE CHECK IF KEEPING TRUE CASES HARMS OR HELPS BERT
"""
"""
CHECK WORD 2 VEC OOV STUFF
"""
"""
CHECK CLASS WEIGHING
"""
"""
CHECK FOR QA CHECK WITHOUT NEGATIVE EXAMPLES
"""
"""
CHECK FOR QA IN FULL MODE
"""
"""
IMPORT MODEL HERE
"""
"""
FIX LSTM AND TRY ORDERED MEMORY AND GCDT AND STUFFS
"""
device = T.device('cuda' if T.cuda.is_available() else 'cpu')
parser = argparse.ArgumentParser(description='Model Name and stuff')
parser.add_argument('--model', type=str, default="ELECTRA_extra_BiLSTM_CRF",
choices=["BERT",
"BERT_CRF",
"BERT_BiLSTM_CRF",
"BERT_w2v_BiLSTM_CRF",
"BERT_extra_BiLSTM_CRF",
"ELECTRA",
"ELECTRA_CRF",
"ELECTRA_fine_tune_CRF",
"ELECTRA_BiLSTM_CRF",
"ELECTRA_w2v_BiLSTM_CRF",
"ELECTRA_extra_BiLSTM_CRF",
"ELECTRA_extra_CRF",
"ELECTRA_extra",
"ELECTRA_w2v_extra_BiLSTM_CRF",
"ELECTRA_extra_BiLSTM_DSC",
"CSE",
"CSE_CRF",
"CSE_BiLSTM_CRF",
"CSE_w2v_BiLSTM_CRF",
"CSE_w2v_extra_BiLSTM_CRF",
"CSE_extra_BiLSTM_CRF"])
parser.add_argument('--dataset', type=str, default="WNUT_2017")
parser.add_argument('--display_step', type=int, default=30)
parser.add_argument('--lr', type=float, default=-1)
parser.add_argument('--fine_tune_lr', type=float, default=-1)
parser.add_argument('--times', type=int, default=1)
parser.add_argument('--mixed_case_training', type=str, default="no",
choices=["yes", "no"])
flags = parser.parse_args()
SEED_base_value = 101
"""
CREATE MAPPINGS HERE
"""
if re.match("^BERT|^ELECTRA", flags.model):
model_dict = {flags.model: BigTransformerTagger}
elif re.match("^CSE", flags.model):
model_dict = {flags.model: CSETagger}
else:
raise ValueError("Invalid model")
config_dict = {flags.model: eval("{0}_config".format(flags.model))}
"""
model_dict = {'BERT': BigTransformerTagger,
'ELECTRA': BigTransformerTagger,
'ELECTRA_CRF': BigTransformerTagger,
"ELECTRA_BiLSTM_CRF": BigTransformerTagger,
'ELECTRA_w2v_BiLSTM_CRF': BigTransformerTagger,
"ELECTRA_w2v_extra_BiLSTM_CRF": BigTransformerTagger,
"ELECTRA_extra_BiLSTM_CRF": BigTransformerTagger,
"ELECTRA_extra": BigTransformerTagger,
"ELECTRA_extra_CRF": BigTransformerTagger}
config_dict = {'BERT': BERT_config,
'ELECTRA': ELECTRA_config,
'ELECTRA_CRF': ELECTRA_CRF_config,
"ELECTRA_BiLSTM_CRF": ELECTRA_BiLSTM_CRF_config,
'ELECTRA_w2v_BiLSTM_CRF': ELECTRA_w2v_BiLSTM_CRF_config,
'ELECTRA_w2v_extra_BiLSTM_CRF': ELECTRA_w2v_extra_BiLSTM_CRF_config,
"ELECTRA_extra_BiLSTM_CRF": ELECTRA_extra_BiLSTM_CRF_config,
"ELECTRA_extra": ELECTRA_extra_config,
"ELECTRA_extra_CRF": ELECTRA_extra_CRF_config}
"""
config = config_dict[flags.model]
config = config()
if flags.lr >= 0:
config.lr = flags.lr
if flags.fine_tune_lr >= 0:
config.fine_tune_lr = flags.fine_tune_lr
display_step = flags.display_step
print('Dataset: {}'.format(flags.dataset))
print("Model Name: {}".format(flags.model))
print("Total Runs: {}".format(flags.times))
print("Learning Rate: {}".format(config.lr))
print("Fine-Tune Learning Rate: {}".format(config.fine_tune_lr))
print("Mixed-Case Training: {}".format(flags.mixed_case_training))
print("Display Step: {}".format(flags.display_step))
print("SEED base value: {}".format(SEED_base_value))
common_data_path = "processed_data/{}/vocab_and_embd.pkl".format(flags.dataset)
if flags.mixed_case_training.lower() == "no":
train_data_path = "processed_data/{}/train_data.json".format(flags.dataset)
else:
train_data_path = "processed_data/{}/train_mixed_data.json".format(flags.dataset)
dev_data_path = "processed_data/{}/dev_data.json".format(flags.dataset)
test_data_path = "processed_data/{}/test_data.json".format(flags.dataset)
checkpoint_directory = "saved_params/{}/".format(flags.dataset)
Path(checkpoint_directory).mkdir(parents=True, exist_ok=True)
Path("output/").mkdir(parents=True, exist_ok=True)
log_directory = os.path.join("logs", "{}".format(flags.dataset))
Path(log_directory).mkdir(parents=True, exist_ok=True)
keys = ['labels2idx', 'segment_labels2idx',
'w2v_vocab2idx', 'ft_vocab2idx', 'ipa2idx', 'pos2idx',
'w2v_embeddings', 'ft_embeddings']
labels2idx, segment_labels2idx,\
w2v_vocab2idx, ft_vocab2idx, ipa2idx, pos2idx, \
w2v_embeddings, ft_embeddings = load_data(common_data_path, 'rb', 'pickle', keys=keys)
idx2labels = {v: k for k, v in labels2idx.items()}
"""
DETERMINES WHAT TO LOAD AND IN WHICH ORDER. NEEDS TO MAKE CHANGES IF YOU WANT TO LOAD SOMETHING ELSE
"""
keys = ["sequence",
"w2v_feats", "fasttext_feats",
"pos_tags",
"ipa_feats", "phono_feats",
"labels", "segment_labels"]
"""
sequence = variable length natural language sequences
w2v_feats = variable length sequences in int format where int id correspond to a word2vec vector (mapped to a word in w2v_vocab2idx)
fasttext_feats = same as above but for fasttext
pos_tags = same as above but int id corresponds to the pos tag of the corresponding word. the id is associated to pos2idx (mapping between id and pos tags). Need to create random embeddings for pos tags.
ipa_feats = character level features will be padded and batched to batch_size x sequence_len x word_len. int format where id correspond to a specific ipa alphabet in ipa2idx mapping. Need to create a randomly initialized embedding.
phono_feats = same as above but each character is represented as a float vector of 22 dimensions instead (can be directly treated as char-level embeddings)
labels = variable length sequence labels for the corresponding sequences. int format. id correspond to a particular label (mapping in labels2idx)
segment_label = we can ignore it for now. Can be later used for multi-tasking for entity-segmentation task (where we do not predict the type of the entity just the boundaries)
"""
"""
For more about load_data see: utils/data_utils.py
"""
train_sample_tuples = load_data(train_data_path, 'r', 'json', keys=keys)
val_sample_tuples = load_data(dev_data_path, 'r', 'json', keys=keys)
test_sample_tuples = load_data(test_data_path, 'r', 'json', keys=keys)
MAX_CHAR_LEN = len(train_sample_tuples[4][0][0])
IPA_PAD = [0]*MAX_CHAR_LEN
PHONO_PAD = [0]*config.phono_feats_dim
PHONO_PAD = [PHONO_PAD]*MAX_CHAR_LEN
if "bert" in flags.model.lower() or "electra" in flags.model.lower():
if "bert" in flags.model.lower():
BigModel = BertModel.from_pretrained(config.embedding_path,
output_hidden_states=True,
output_attentions=False)
tokenizer = BertTokenizerFast.from_pretrained(config.embedding_path,
output_hidden_states=True,
output_attentions=False)
elif "electra" in flags.model.lower():
BigModel = ElectraModel.from_pretrained(config.embedding_path,
output_hidden_states=True,
output_attentions=False)
tokenizer = ElectraTokenizerFast.from_pretrained(config.embedding_path,
output_hidden_states=True,
output_attentions=False)
pad_types = [None, w2v_vocab2idx['<pad>'], ft_vocab2idx['<pad>'],
pos2idx['G'], IPA_PAD, PHONO_PAD, labels2idx["O"], segment_labels2idx["O"]]
else:
cse_gen = CSEGenerator(config.use_forward, config.use_backward)
tokenizer = None
"""
Probably need to do nothing for CSE here
text sequences will not be padded (can be padded later after embedding)
will need to change things if using precomputed embeddings
"""
pad_types = [None, w2v_vocab2idx['<pad>'], ft_vocab2idx['<pad>'],
pos2idx['G'], IPA_PAD, PHONO_PAD, labels2idx["O"], segment_labels2idx["O"]]
def run(time, display_params=False):
global model_dict
global flags
global config
global device
global checkpoint_directory, log_directory
global BigModel
global w2v_embeddings, ft_embeddings
global ft_vocab2idx, w2v_vocab2idx, pos2idx, ipa2idx, labels2idx
mixed_string = "" if flags.mixed_case_training.lower() == "no" else "mixed_case_"
checkpoint_path = os.path.join(
checkpoint_directory, "{}_{}run{}.pt".format(flags.model, mixed_string, time))
log_path = os.path.join(log_directory,
"{}_{}run{}.json".format(flags.model, mixed_string, time))
# print(checkpoint_path)
# print("Model: {}".format(config.model_name))
NamedEntitiyRecognizer = model_dict[flags.model]
"""
May need to make changes here and may be some conditional statements
"""
if 'bert' in flags.model.lower() or 'electra' in flags.model.lower():
if config.use_w2v:
classic_embeddings = w2v_embeddings
word_pad_id = w2v_vocab2idx['<pad>']
elif config.use_fasttext:
classic_embeddings = ft_embeddings
word_pad_id = ft_vocab2idx['<pad>']
else:
classic_embeddings = None
word_pad_id = None
if config.use_pos_tags:
pos_vocab_size = len(pos2idx)
else:
pos_vocab_size = None
if config.use_char_feats:
ipa_vocab_size = len(ipa2idx)
else:
ipa_vocab_size = None
model = NamedEntitiyRecognizer(BigTransformer=BigModel,
classes_num=len(labels2idx),
negative_index=labels2idx['O'],
config=config,
device=device,
classic_embeddings=classic_embeddings,
word_pad_id=word_pad_id,
pos_vocab_size=pos_vocab_size,
ipa_vocab_size=ipa_vocab_size)
else:
"""
Put CSE code here
"""
if config.use_w2v:
classic_embeddings = w2v_embeddings
word_pad_id = w2v_vocab2idx['<pad>']
elif config.use_fasttext:
classic_embeddings = ft_embeddings
word_pad_id = ft_vocab2idx['<pad>']
else:
classic_embeddings = None
word_pad_id = None
if config.use_pos_tags:
pos_vocab_size = len(pos2idx)
else:
pos_vocab_size = None
if config.use_char_feats:
ipa_vocab_size = len(ipa2idx)
else:
ipa_vocab_size = None
model = NamedEntitiyRecognizer(cse_gen,
classes_num=len(labels2idx),
config=config,
device=device,
classic_embeddings=classic_embeddings,
word_pad_id=word_pad_id,
ipa_vocab_size=ipa_vocab_size,
pos_vocab_size=pos_vocab_size)
model = model.to(device)
parameters = [p for p in model.parameters() if p.requires_grad]
parameter_count = param_count(parameters)
print("\n\nParameter Count: {}\n\n".format(parameter_count))
if display_params:
param_display_fn(model)
print("RUN: {}\n\n".format(time))
run_epochs(model, config, checkpoint_path, log_path)
def run_epochs(model, config, checkpoint_path, log_path):
"""
raise ValueError(
"Have you remembered to save the whole epoch log? (both dump output and in a dict)")
"""
global train_sample_tuples, val_sample_tuples, test_sample_tuples
train_actual_iters = count_actual_iterations(train_sample_tuples[0], config)
val_actual_iters = count_actual_iterations(val_sample_tuples[0], config)
test_actual_iters = count_actual_iterations(test_sample_tuples[0], config)
train_effective_iters = count_effective_iterations(train_sample_tuples[0], config)
val_effective_iters = count_effective_iterations(val_sample_tuples[0], config)
test_effective_iters = count_effective_iterations(test_sample_tuples[0], config)
# print(train_iters)
optimizer = load_LRangerMod(model,
config=config) # misleading just running AdamW now
print('Loading pre-trained weights for the model...')
checkpoint = T.load(checkpoint_path)
model.load_state_dict(checkpoint['model_state_dict'])
print('\nRESTORATION COMPLETE\n')
optimizer.zero_grad()
# with tqdm(total=config.epochs-past_epoch, desc='Epoch', position=0) as pbar:
print("TESTING\n")
test_loss, test_F1 = run_batches(test_sample_tuples,
epoch=0,
model=model,
optimizer=optimizer,
config=config,
generator_len=test_actual_iters,
train=False,
desc='Test Batch')
# print(test_F1)
def run_batches(sample_tuples, epoch,
model, optimizer, config,
generator_len,
train=True, scheduler=None,
desc=None):
global display_step
global pad_types
global tokenizer
global idx2labels
global flags
accu_step = config.total_batch_size//config.train_batch_size
if desc is None:
desc = 'Batch'
losses = []
F1s = []
total_tp = 0
total_pred_len = 0
total_gold_len = 0
# copy_tuples = copy.deepcopy(sample_tuples)
f = open("output/out_{}.txt".format(flags.model), "w")
f.write('')
f.close()
with tqdm(total=generator_len, desc=desc, position=0) as pbar:
i = 0
for batch, batch_masks in batcher(sample_tuples,
pad_types,
config.train_batch_size,
sort_by_idx=1):
# pbar = tqdm(total=generator_len, desc='Batch', position=0)
batch_texts = batch[0]
batch_w2v_idx = batch[1]
batch_ft_idx = batch[2]
batch_pos_idx = batch[3]
batch_ipa_idx = batch[4]
batch_phono = batch[5]
batch_labels = batch[6]
batch_segment_labels = batch[7]
batch_mask = batch_masks[1]
"""
IMPLEMENT INSIDE utils/ml_utils.py
"""
predictions, loss = predict_NER(model=model,
tokenizer=tokenizer,
batch_texts=batch_texts,
batch_w2v_idx=batch_w2v_idx,
batch_ft_idx=batch_ft_idx,
batch_pos_idx=batch_pos_idx,
batch_ipa_idx=batch_ipa_idx,
batch_phono=batch_phono,
batch_labels=batch_labels,
batch_segment_labels=batch_segment_labels,
batch_mask=batch_mask,
device=device,
config=config,
train=train)
losses.append(loss.item())
if train:
loss = loss/accu_step
loss.backward()
if (i+1) % accu_step == 0: # Update accumulated gradients
T.nn.utils.clip_grad_norm_(model.parameters(), config.max_grad_norm)
optimizer.step()
optimizer.zero_grad()
tp, pred_len, gold_len = eval_stats(predictions,
batch_labels,
batch_mask,
idx2labels)
prec, rec, F1 = compute_F1(tp, pred_len, gold_len)
F1s.append(F1)
if i % display_step == 0:
pbar.write("Model: {}, Epoch: {:3d}, Iter: {:5d}, ".format(config.model_name, epoch, i) +
"Loss: {:.3f}, F1: {:.3f}".format(loss, F1))
else:
f = open("output/out_{}.txt".format(flags.model), "a")
for prediction_sample, gold_sample, mask in zip(predictions, batch_labels, batch_mask):
true_seq_len = sum(mask)
prediction_sample = prediction_sample[0:true_seq_len]
gold_sample = gold_sample[0:true_seq_len]
for pred, gold in zip(prediction_sample, gold_sample):
f.write("test NNP "+str(idx2labels[gold])+" "+str(idx2labels[pred])+"\n")
f.close()
tp, pred_len, gold_len = eval_stats(predictions,
batch_labels,
batch_mask,
idx2labels)
prec, rec, F1 = compute_F1(tp, pred_len, gold_len)
total_tp += tp
total_pred_len += pred_len
total_gold_len += gold_len
if i % display_step == 0:
pbar.write("Model: {}, Epoch: {:3d}, Iter: {:5d}, ".format(config.model_name, epoch, i) +
"Loss: {:.3f}".format(loss))
i += 1
pbar.update(1)
# print("generator_len", generator_len)
# print("i", i)
print("\n\n")
if train:
F1 = np.mean(F1s)
else:
prec, rec, F1 = compute_F1(total_tp, total_pred_len, total_gold_len)
# del copy_tuples
return np.mean(losses), F1
if __name__ == '__main__':
time = 0
while time < flags.times:
if time == 0:
"""
time_str = input("\nStarting time (0,1,2.....times): ")
try:
time = int(time_str)
except:
time = 0
"""
time = 0
SEED = SEED_base_value+time
T.manual_seed(SEED)
random.seed(SEED)
T.backends.cudnn.deterministic = True
T.backends.cudnn.benchmark = False
np.random.seed(SEED)
run(time, display_params=True)
time += 1
| 35.953819
| 231
| 0.579636
|
import numpy as np
import random
from dataLoader.batch import batcher
from transformers import BertTokenizerFast, ElectraTokenizerFast
from configs.WNUT_configs import *
from utils.ml_utils import *
from utils.data_utils import *
from utils.metric_utils import *
import argparse
from tqdm import tqdm
from pathlib import Path
import os
import torch as T
import torch.nn as nn
from models.BigTransformerTagger import BigTransformerTagger
from models.CSETagger import CSETagger
from models.layers.BigTransformers.BERT import BertModel
from models.layers.BigTransformers.ELECTRA import ElectraModel
from models.cse_generator import CSEGenerator
import json
import sys
import re
device = T.device('cuda' if T.cuda.is_available() else 'cpu')
parser = argparse.ArgumentParser(description='Model Name and stuff')
parser.add_argument('--model', type=str, default="ELECTRA_extra_BiLSTM_CRF",
choices=["BERT",
"BERT_CRF",
"BERT_BiLSTM_CRF",
"BERT_w2v_BiLSTM_CRF",
"BERT_extra_BiLSTM_CRF",
"ELECTRA",
"ELECTRA_CRF",
"ELECTRA_fine_tune_CRF",
"ELECTRA_BiLSTM_CRF",
"ELECTRA_w2v_BiLSTM_CRF",
"ELECTRA_extra_BiLSTM_CRF",
"ELECTRA_extra_CRF",
"ELECTRA_extra",
"ELECTRA_w2v_extra_BiLSTM_CRF",
"ELECTRA_extra_BiLSTM_DSC",
"CSE",
"CSE_CRF",
"CSE_BiLSTM_CRF",
"CSE_w2v_BiLSTM_CRF",
"CSE_w2v_extra_BiLSTM_CRF",
"CSE_extra_BiLSTM_CRF"])
parser.add_argument('--dataset', type=str, default="WNUT_2017")
parser.add_argument('--display_step', type=int, default=30)
parser.add_argument('--lr', type=float, default=-1)
parser.add_argument('--fine_tune_lr', type=float, default=-1)
parser.add_argument('--times', type=int, default=1)
parser.add_argument('--mixed_case_training', type=str, default="no",
choices=["yes", "no"])
flags = parser.parse_args()
SEED_base_value = 101
if re.match("^BERT|^ELECTRA", flags.model):
model_dict = {flags.model: BigTransformerTagger}
elif re.match("^CSE", flags.model):
model_dict = {flags.model: CSETagger}
else:
raise ValueError("Invalid model")
config_dict = {flags.model: eval("{0}_config".format(flags.model))}
config = config_dict[flags.model]
config = config()
if flags.lr >= 0:
config.lr = flags.lr
if flags.fine_tune_lr >= 0:
config.fine_tune_lr = flags.fine_tune_lr
display_step = flags.display_step
print('Dataset: {}'.format(flags.dataset))
print("Model Name: {}".format(flags.model))
print("Total Runs: {}".format(flags.times))
print("Learning Rate: {}".format(config.lr))
print("Fine-Tune Learning Rate: {}".format(config.fine_tune_lr))
print("Mixed-Case Training: {}".format(flags.mixed_case_training))
print("Display Step: {}".format(flags.display_step))
print("SEED base value: {}".format(SEED_base_value))
common_data_path = "processed_data/{}/vocab_and_embd.pkl".format(flags.dataset)
if flags.mixed_case_training.lower() == "no":
train_data_path = "processed_data/{}/train_data.json".format(flags.dataset)
else:
train_data_path = "processed_data/{}/train_mixed_data.json".format(flags.dataset)
dev_data_path = "processed_data/{}/dev_data.json".format(flags.dataset)
test_data_path = "processed_data/{}/test_data.json".format(flags.dataset)
checkpoint_directory = "saved_params/{}/".format(flags.dataset)
Path(checkpoint_directory).mkdir(parents=True, exist_ok=True)
Path("output/").mkdir(parents=True, exist_ok=True)
log_directory = os.path.join("logs", "{}".format(flags.dataset))
Path(log_directory).mkdir(parents=True, exist_ok=True)
keys = ['labels2idx', 'segment_labels2idx',
'w2v_vocab2idx', 'ft_vocab2idx', 'ipa2idx', 'pos2idx',
'w2v_embeddings', 'ft_embeddings']
labels2idx, segment_labels2idx,\
w2v_vocab2idx, ft_vocab2idx, ipa2idx, pos2idx, \
w2v_embeddings, ft_embeddings = load_data(common_data_path, 'rb', 'pickle', keys=keys)
idx2labels = {v: k for k, v in labels2idx.items()}
keys = ["sequence",
"w2v_feats", "fasttext_feats",
"pos_tags",
"ipa_feats", "phono_feats",
"labels", "segment_labels"]
train_sample_tuples = load_data(train_data_path, 'r', 'json', keys=keys)
val_sample_tuples = load_data(dev_data_path, 'r', 'json', keys=keys)
test_sample_tuples = load_data(test_data_path, 'r', 'json', keys=keys)
MAX_CHAR_LEN = len(train_sample_tuples[4][0][0])
IPA_PAD = [0]*MAX_CHAR_LEN
PHONO_PAD = [0]*config.phono_feats_dim
PHONO_PAD = [PHONO_PAD]*MAX_CHAR_LEN
if "bert" in flags.model.lower() or "electra" in flags.model.lower():
if "bert" in flags.model.lower():
BigModel = BertModel.from_pretrained(config.embedding_path,
output_hidden_states=True,
output_attentions=False)
tokenizer = BertTokenizerFast.from_pretrained(config.embedding_path,
output_hidden_states=True,
output_attentions=False)
elif "electra" in flags.model.lower():
BigModel = ElectraModel.from_pretrained(config.embedding_path,
output_hidden_states=True,
output_attentions=False)
tokenizer = ElectraTokenizerFast.from_pretrained(config.embedding_path,
output_hidden_states=True,
output_attentions=False)
pad_types = [None, w2v_vocab2idx['<pad>'], ft_vocab2idx['<pad>'],
pos2idx['G'], IPA_PAD, PHONO_PAD, labels2idx["O"], segment_labels2idx["O"]]
else:
cse_gen = CSEGenerator(config.use_forward, config.use_backward)
tokenizer = None
"""
Probably need to do nothing for CSE here
text sequences will not be padded (can be padded later after embedding)
will need to change things if using precomputed embeddings
"""
pad_types = [None, w2v_vocab2idx['<pad>'], ft_vocab2idx['<pad>'],
pos2idx['G'], IPA_PAD, PHONO_PAD, labels2idx["O"], segment_labels2idx["O"]]
def run(time, display_params=False):
global model_dict
global flags
global config
global device
global checkpoint_directory, log_directory
global BigModel
global w2v_embeddings, ft_embeddings
global ft_vocab2idx, w2v_vocab2idx, pos2idx, ipa2idx, labels2idx
mixed_string = "" if flags.mixed_case_training.lower() == "no" else "mixed_case_"
checkpoint_path = os.path.join(
checkpoint_directory, "{}_{}run{}.pt".format(flags.model, mixed_string, time))
log_path = os.path.join(log_directory,
"{}_{}run{}.json".format(flags.model, mixed_string, time))
NamedEntitiyRecognizer = model_dict[flags.model]
if 'bert' in flags.model.lower() or 'electra' in flags.model.lower():
if config.use_w2v:
classic_embeddings = w2v_embeddings
word_pad_id = w2v_vocab2idx['<pad>']
elif config.use_fasttext:
classic_embeddings = ft_embeddings
word_pad_id = ft_vocab2idx['<pad>']
else:
classic_embeddings = None
word_pad_id = None
if config.use_pos_tags:
pos_vocab_size = len(pos2idx)
else:
pos_vocab_size = None
if config.use_char_feats:
ipa_vocab_size = len(ipa2idx)
else:
ipa_vocab_size = None
model = NamedEntitiyRecognizer(BigTransformer=BigModel,
classes_num=len(labels2idx),
negative_index=labels2idx['O'],
config=config,
device=device,
classic_embeddings=classic_embeddings,
word_pad_id=word_pad_id,
pos_vocab_size=pos_vocab_size,
ipa_vocab_size=ipa_vocab_size)
else:
"""
Put CSE code here
"""
if config.use_w2v:
classic_embeddings = w2v_embeddings
word_pad_id = w2v_vocab2idx['<pad>']
elif config.use_fasttext:
classic_embeddings = ft_embeddings
word_pad_id = ft_vocab2idx['<pad>']
else:
classic_embeddings = None
word_pad_id = None
if config.use_pos_tags:
pos_vocab_size = len(pos2idx)
else:
pos_vocab_size = None
if config.use_char_feats:
ipa_vocab_size = len(ipa2idx)
else:
ipa_vocab_size = None
model = NamedEntitiyRecognizer(cse_gen,
classes_num=len(labels2idx),
config=config,
device=device,
classic_embeddings=classic_embeddings,
word_pad_id=word_pad_id,
ipa_vocab_size=ipa_vocab_size,
pos_vocab_size=pos_vocab_size)
model = model.to(device)
parameters = [p for p in model.parameters() if p.requires_grad]
parameter_count = param_count(parameters)
print("\n\nParameter Count: {}\n\n".format(parameter_count))
if display_params:
param_display_fn(model)
print("RUN: {}\n\n".format(time))
run_epochs(model, config, checkpoint_path, log_path)
def run_epochs(model, config, checkpoint_path, log_path):
global train_sample_tuples, val_sample_tuples, test_sample_tuples
train_actual_iters = count_actual_iterations(train_sample_tuples[0], config)
val_actual_iters = count_actual_iterations(val_sample_tuples[0], config)
test_actual_iters = count_actual_iterations(test_sample_tuples[0], config)
train_effective_iters = count_effective_iterations(train_sample_tuples[0], config)
val_effective_iters = count_effective_iterations(val_sample_tuples[0], config)
test_effective_iters = count_effective_iterations(test_sample_tuples[0], config)
optimizer = load_LRangerMod(model,
config=config)
print('Loading pre-trained weights for the model...')
checkpoint = T.load(checkpoint_path)
model.load_state_dict(checkpoint['model_state_dict'])
print('\nRESTORATION COMPLETE\n')
optimizer.zero_grad()
print("TESTING\n")
test_loss, test_F1 = run_batches(test_sample_tuples,
epoch=0,
model=model,
optimizer=optimizer,
config=config,
generator_len=test_actual_iters,
train=False,
desc='Test Batch')
def run_batches(sample_tuples, epoch,
model, optimizer, config,
generator_len,
train=True, scheduler=None,
desc=None):
global display_step
global pad_types
global tokenizer
global idx2labels
global flags
accu_step = config.total_batch_size//config.train_batch_size
if desc is None:
desc = 'Batch'
losses = []
F1s = []
total_tp = 0
total_pred_len = 0
total_gold_len = 0
f = open("output/out_{}.txt".format(flags.model), "w")
f.write('')
f.close()
with tqdm(total=generator_len, desc=desc, position=0) as pbar:
i = 0
for batch, batch_masks in batcher(sample_tuples,
pad_types,
config.train_batch_size,
sort_by_idx=1):
batch_texts = batch[0]
batch_w2v_idx = batch[1]
batch_ft_idx = batch[2]
batch_pos_idx = batch[3]
batch_ipa_idx = batch[4]
batch_phono = batch[5]
batch_labels = batch[6]
batch_segment_labels = batch[7]
batch_mask = batch_masks[1]
predictions, loss = predict_NER(model=model,
tokenizer=tokenizer,
batch_texts=batch_texts,
batch_w2v_idx=batch_w2v_idx,
batch_ft_idx=batch_ft_idx,
batch_pos_idx=batch_pos_idx,
batch_ipa_idx=batch_ipa_idx,
batch_phono=batch_phono,
batch_labels=batch_labels,
batch_segment_labels=batch_segment_labels,
batch_mask=batch_mask,
device=device,
config=config,
train=train)
losses.append(loss.item())
if train:
loss = loss/accu_step
loss.backward()
if (i+1) % accu_step == 0:
T.nn.utils.clip_grad_norm_(model.parameters(), config.max_grad_norm)
optimizer.step()
optimizer.zero_grad()
tp, pred_len, gold_len = eval_stats(predictions,
batch_labels,
batch_mask,
idx2labels)
prec, rec, F1 = compute_F1(tp, pred_len, gold_len)
F1s.append(F1)
if i % display_step == 0:
pbar.write("Model: {}, Epoch: {:3d}, Iter: {:5d}, ".format(config.model_name, epoch, i) +
"Loss: {:.3f}, F1: {:.3f}".format(loss, F1))
else:
f = open("output/out_{}.txt".format(flags.model), "a")
for prediction_sample, gold_sample, mask in zip(predictions, batch_labels, batch_mask):
true_seq_len = sum(mask)
prediction_sample = prediction_sample[0:true_seq_len]
gold_sample = gold_sample[0:true_seq_len]
for pred, gold in zip(prediction_sample, gold_sample):
f.write("test NNP "+str(idx2labels[gold])+" "+str(idx2labels[pred])+"\n")
f.close()
tp, pred_len, gold_len = eval_stats(predictions,
batch_labels,
batch_mask,
idx2labels)
prec, rec, F1 = compute_F1(tp, pred_len, gold_len)
total_tp += tp
total_pred_len += pred_len
total_gold_len += gold_len
if i % display_step == 0:
pbar.write("Model: {}, Epoch: {:3d}, Iter: {:5d}, ".format(config.model_name, epoch, i) +
"Loss: {:.3f}".format(loss))
i += 1
pbar.update(1)
print("\n\n")
if train:
F1 = np.mean(F1s)
else:
prec, rec, F1 = compute_F1(total_tp, total_pred_len, total_gold_len)
return np.mean(losses), F1
if __name__ == '__main__':
time = 0
while time < flags.times:
if time == 0:
time = 0
SEED = SEED_base_value+time
T.manual_seed(SEED)
random.seed(SEED)
T.backends.cudnn.deterministic = True
T.backends.cudnn.benchmark = False
np.random.seed(SEED)
run(time, display_params=True)
time += 1
| true
| true
|
f70561104782de2e235d4511bf0bf0b3283d5e1a
| 323
|
py
|
Python
|
exercises/play_ground/pg_022.py
|
EngineerToBe/python-labs
|
dbedcf1f8ebb4bdf756c732ad65c3b737df62cdf
|
[
"Apache-2.0"
] | null | null | null |
exercises/play_ground/pg_022.py
|
EngineerToBe/python-labs
|
dbedcf1f8ebb4bdf756c732ad65c3b737df62cdf
|
[
"Apache-2.0"
] | null | null | null |
exercises/play_ground/pg_022.py
|
EngineerToBe/python-labs
|
dbedcf1f8ebb4bdf756c732ad65c3b737df62cdf
|
[
"Apache-2.0"
] | null | null | null |
# Create a function named more_than_n that has three parameters named lst, item, and n.
# The function should return True if item appears in the list more than n times. The function should return False otherwise.
def more_than_n(lst, item, n):
if lst.count(item) > n:
return True
else:
return False
| 40.375
| 124
| 0.71517
|
def more_than_n(lst, item, n):
if lst.count(item) > n:
return True
else:
return False
| true
| true
|
f705622e5940a8d2588cb5974ea07bf217146809
| 20,749
|
py
|
Python
|
PhysicsTools/PatAlgos/python/slimming/applySubstructure_cff.py
|
gputtley/cmssw
|
c1ef8454804e4ebea8b65f59c4a952a6c94fde3b
|
[
"Apache-2.0"
] | 3
|
2018-08-24T19:10:26.000Z
|
2019-02-19T11:45:32.000Z
|
PhysicsTools/PatAlgos/python/slimming/applySubstructure_cff.py
|
gputtley/cmssw
|
c1ef8454804e4ebea8b65f59c4a952a6c94fde3b
|
[
"Apache-2.0"
] | 8
|
2020-03-20T23:18:36.000Z
|
2020-05-27T11:00:06.000Z
|
PhysicsTools/PatAlgos/python/slimming/applySubstructure_cff.py
|
gputtley/cmssw
|
c1ef8454804e4ebea8b65f59c4a952a6c94fde3b
|
[
"Apache-2.0"
] | 5
|
2018-08-21T16:37:52.000Z
|
2020-01-09T13:33:17.000Z
|
import FWCore.ParameterSet.Config as cms
from PhysicsTools.PatAlgos.tools.helpers import getPatAlgosToolsTask, addToProcessAndTask
def applySubstructure( process, postfix="" ) :
task = getPatAlgosToolsTask(process)
from PhysicsTools.PatAlgos.tools.jetTools import addJetCollection
from PhysicsTools.PatAlgos.producersLayer1.jetProducer_cfi import _patJets as patJetsDefault
# Configure the RECO jets
from RecoJets.JetProducers.ak4PFJets_cfi import ak4PFJetsPuppi
from RecoJets.JetProducers.ak8PFJets_cfi import ak8PFJetsPuppi, ak8PFJetsPuppiSoftDrop, ak8PFJetsPuppiConstituents, ak8PFJetsCHSConstituents
from RecoJets.JetProducers.ak8GenJets_cfi import ak8GenJets, ak8GenJetsSoftDrop, ak8GenJetsConstituents
addToProcessAndTask('ak4PFJetsPuppi'+postfix,ak4PFJetsPuppi.clone(), process, task)
addToProcessAndTask('ak8PFJetsPuppi'+postfix,ak8PFJetsPuppi.clone(), process, task)
addToProcessAndTask('ak8PFJetsPuppiConstituents', ak8PFJetsPuppiConstituents.clone(cut = cms.string('pt > 170.0 && abs(rapidity()) < 2.4') ), process, task )
addToProcessAndTask('ak8PFJetsCHSConstituents', ak8PFJetsCHSConstituents.clone(), process, task )
addToProcessAndTask('ak8PFJetsPuppiSoftDrop'+postfix, ak8PFJetsPuppiSoftDrop.clone( src = cms.InputTag('ak8PFJetsPuppiConstituents', 'constituents') ), process, task)
addToProcessAndTask('ak8GenJetsNoNuConstituents'+postfix, ak8GenJetsConstituents.clone(src='ak8GenJetsNoNu'), process, task )
addToProcessAndTask('ak8GenJetsNoNuSoftDrop'+postfix,ak8GenJetsSoftDrop.clone(src=cms.InputTag('ak8GenJetsNoNuConstituents'+postfix, 'constituents')),process,task)
addToProcessAndTask('slimmedGenJetsAK8SoftDropSubJets'+postfix,
cms.EDProducer("PATGenJetSlimmer",
src = cms.InputTag("ak8GenJetsNoNuSoftDrop"+postfix, "SubJets"),
packedGenParticles = cms.InputTag("packedGenParticles"),
cut = cms.string(""),
cutLoose = cms.string(""),
nLoose = cms.uint32(0),
clearDaughters = cms.bool(False), #False means rekeying
dropSpecific = cms.bool(True), # Save space
), process, task )
#add AK8 CHS
addJetCollection(process, postfix=postfix, labelName = 'AK8',
jetSource = cms.InputTag('ak8PFJetsCHS'+postfix),
algo= 'AK', rParam = 0.8,
btagDiscriminators = ['None'],
jetCorrections = ('AK8PFchs', cms.vstring(['L1FastJet', 'L2Relative', 'L3Absolute']), 'None'),
genJetCollection = cms.InputTag('slimmedGenJetsAK8')
)
getattr(process,"patJetsAK8"+postfix).userData.userFloats.src = [] # start with empty list of user floats
getattr(process,"selectedPatJetsAK8").cut = cms.string("pt > 170")
## add AK8 groomed masses with CHS
from RecoJets.Configuration.RecoPFJets_cff import ak8PFJetsCHSPruned, ak8PFJetsCHSSoftDrop
addToProcessAndTask('ak8PFJetsCHSPruned'+postfix, ak8PFJetsCHSPruned.clone(), process, task)
addToProcessAndTask('ak8PFJetsCHSSoftDrop'+postfix, ak8PFJetsCHSSoftDrop.clone(), process, task)
from RecoJets.JetProducers.ak8PFJetsCHS_groomingValueMaps_cfi import ak8PFJetsCHSPrunedMass, ak8PFJetsCHSTrimmedMass, ak8PFJetsCHSFilteredMass, ak8PFJetsCHSSoftDropMass
addToProcessAndTask('ak8PFJetsCHSPrunedMass'+postfix, ak8PFJetsCHSPrunedMass.clone(), process, task)
addToProcessAndTask('ak8PFJetsCHSTrimmedMass'+postfix, ak8PFJetsCHSTrimmedMass.clone(), process, task)
addToProcessAndTask('ak8PFJetsCHSFilteredMass'+postfix, ak8PFJetsCHSFilteredMass.clone(), process, task)
addToProcessAndTask('ak8PFJetsCHSSoftDropMass'+postfix, ak8PFJetsCHSSoftDropMass.clone(), process, task)
getattr(process,"patJetsAK8").userData.userFloats.src += ['ak8PFJetsCHSPrunedMass'+postfix,'ak8PFJetsCHSSoftDropMass'+postfix]
getattr(process,"patJetsAK8").addTagInfos = cms.bool(False)
# add Njetiness for CHS
process.load('RecoJets.JetProducers.nJettinessAdder_cfi')
task.add(process.Njettiness)
addToProcessAndTask('NjettinessAK8'+postfix, process.Njettiness.clone(), process, task)
getattr(process,"NjettinessAK8").src = cms.InputTag("ak8PFJetsCHS"+postfix)
getattr(process,"NjettinessAK8").cone = cms.double(0.8)
getattr(process,"patJetsAK8").userData.userFloats.src += ['NjettinessAK8'+postfix+':tau1','NjettinessAK8'+postfix+':tau2','NjettinessAK8'+postfix+':tau3','NjettinessAK8'+postfix+':tau4']
# add Njetiness from CHS
addToProcessAndTask('NjettinessAK8Subjets'+postfix, process.Njettiness.clone(), process, task)
getattr(process,"NjettinessAK8Subjets"+postfix).src = cms.InputTag("ak8PFJetsPuppiSoftDrop"+postfix, "SubJets")
getattr(process,"NjettinessAK8Subjets").cone = cms.double(0.8)
## PATify CHS soft drop fat jets
addJetCollection(
process,
postfix=postfix,
labelName = 'AK8PFCHSSoftDrop',
jetSource = cms.InputTag('ak8PFJetsCHSSoftDrop'+postfix),
btagDiscriminators = ['None'],
jetCorrections = ('AK8PFchs', ['L1FastJet', 'L2Relative', 'L3Absolute'], 'None'),
getJetMCFlavour = False # jet flavor disabled
)
#add RECO AK8 from PUPPI and RECO AK8 PUPPI with soft drop... will be needed by ungroomed AK8 jets later
## PATify puppi soft drop fat jets
addJetCollection(
process,
postfix=postfix,
labelName = 'AK8PFPuppiSoftDrop' + postfix,
jetSource = cms.InputTag('ak8PFJetsPuppiSoftDrop'+postfix),
btagDiscriminators = ['None'],
genJetCollection = cms.InputTag('slimmedGenJetsAK8'),
jetCorrections = ('AK8PFPuppi', ['L2Relative', 'L3Absolute'], 'None'),
getJetMCFlavour = False # jet flavor disabled
)
## PATify soft drop subjets
addJetCollection(
process,
postfix=postfix,
labelName = 'AK8PFPuppiSoftDropSubjets',
jetSource = cms.InputTag('ak8PFJetsPuppiSoftDrop'+postfix,'SubJets'),
algo = 'ak', # needed for subjet flavor clustering
rParam = 0.8, # needed for subjet flavor clustering
btagDiscriminators = ['pfDeepCSVJetTags:probb', 'pfDeepCSVJetTags:probbb', 'pfCombinedInclusiveSecondaryVertexV2BJetTags','pfCombinedMVAV2BJetTags'],
jetCorrections = ('AK4PFPuppi', ['L2Relative', 'L3Absolute'], 'None'),
explicitJTA = True, # needed for subjet b tagging
svClustering = True, # needed for subjet b tagging
genJetCollection = cms.InputTag('slimmedGenJetsAK8SoftDropSubJets'),
fatJets=cms.InputTag('ak8PFJetsPuppi'), # needed for subjet flavor clustering
groomedFatJets=cms.InputTag('ak8PFJetsPuppiSoftDrop') # needed for subjet flavor clustering
)
# add groomed ECFs and N-subjettiness to soft dropped pat::Jets for fat jets and subjets
process.load('RecoJets.JetProducers.ECF_cff')
addToProcessAndTask('nb1AK8PuppiSoftDrop'+postfix, process.ecfNbeta1.clone(src = cms.InputTag("ak8PFJetsPuppiSoftDrop"+postfix), cuts = cms.vstring('', '', 'pt > 250')), process, task)
addToProcessAndTask('nb2AK8PuppiSoftDrop'+postfix, process.ecfNbeta2.clone(src = cms.InputTag("ak8PFJetsPuppiSoftDrop"+postfix), cuts = cms.vstring('', '', 'pt > 250')), process, task)
#too slow now ==> disable
from Configuration.Eras.Modifier_pp_on_AA_2018_cff import pp_on_AA_2018
from Configuration.Eras.Modifier_pp_on_XeXe_2017_cff import pp_on_XeXe_2017
from Configuration.Eras.Modifier_phase2_common_cff import phase2_common
for e in [pp_on_XeXe_2017, pp_on_AA_2018, phase2_common]:
e.toModify(getattr(process,'nb1AK8PuppiSoftDrop'+postfix), cuts = ['pt > 999999', 'pt > 999999', 'pt > 999999'] )
e.toModify(getattr(process,'nb2AK8PuppiSoftDrop'+postfix), cuts = ['pt > 999999', 'pt > 999999', 'pt > 999999'] )
getattr(process,"patJetsAK8PFPuppiSoftDrop").userData.userFloats.src += ['nb1AK8PuppiSoftDrop'+postfix+':ecfN2','nb1AK8PuppiSoftDrop'+postfix+':ecfN3']
getattr(process,"patJetsAK8PFPuppiSoftDrop").userData.userFloats.src += ['nb2AK8PuppiSoftDrop'+postfix+':ecfN2','nb2AK8PuppiSoftDrop'+postfix+':ecfN3']
addToProcessAndTask('nb1AK8PuppiSoftDropSubjets'+postfix, process.ecfNbeta1.clone(src = cms.InputTag("ak8PFJetsPuppiSoftDrop"+postfix, "SubJets")), process, task)
addToProcessAndTask('nb2AK8PuppiSoftDropSubjets'+postfix, process.ecfNbeta2.clone(src = cms.InputTag("ak8PFJetsPuppiSoftDrop"+postfix, "SubJets")), process, task)
getattr(process,"patJetsAK8PFPuppiSoftDropSubjets"+postfix).userData.userFloats.src += ['nb1AK8PuppiSoftDropSubjets'+postfix+':ecfN2','nb1AK8PuppiSoftDropSubjets'+postfix+':ecfN3']
getattr(process,"patJetsAK8PFPuppiSoftDropSubjets"+postfix).userData.userFloats.src += ['nb2AK8PuppiSoftDropSubjets'+postfix+':ecfN2','nb2AK8PuppiSoftDropSubjets'+postfix+':ecfN3']
getattr(process,"patJetsAK8PFPuppiSoftDropSubjets"+postfix).userData.userFloats.src += ['NjettinessAK8Subjets'+postfix+':tau1','NjettinessAK8Subjets'+postfix+':tau2','NjettinessAK8Subjets'+postfix+':tau3','NjettinessAK8Subjets'+postfix+':tau4']
for e in [pp_on_XeXe_2017, pp_on_AA_2018, phase2_common]:
e.toModify(getattr(process,'nb1AK8PuppiSoftDropSubjets'+postfix), cuts = ['pt > 999999', 'pt > 999999', 'pt > 999999'] )
e.toModify(getattr(process,'nb2AK8PuppiSoftDropSubjets'+postfix), cuts = ['pt > 999999', 'pt > 999999', 'pt > 999999'] )
# rekey the groomed ECF value maps to the ungroomed reco jets, which will then be picked
# up by PAT in the user floats.
addToProcessAndTask("ak8PFJetsPuppiSoftDropValueMap"+postfix,
cms.EDProducer("RecoJetToPatJetDeltaRValueMapProducer",
src = cms.InputTag("ak8PFJetsPuppi"+postfix),
matched = cms.InputTag("patJetsAK8PFPuppiSoftDrop"+postfix),
distMax = cms.double(0.8),
values = cms.vstring([
'userFloat("nb1AK8PuppiSoftDrop'+postfix+':ecfN2")',
'userFloat("nb1AK8PuppiSoftDrop'+postfix+':ecfN3")',
'userFloat("nb2AK8PuppiSoftDrop'+postfix+':ecfN2")',
'userFloat("nb2AK8PuppiSoftDrop'+postfix+':ecfN3")',
]),
valueLabels = cms.vstring( [
'nb1AK8PuppiSoftDropN2',
'nb1AK8PuppiSoftDropN3',
'nb2AK8PuppiSoftDropN2',
'nb2AK8PuppiSoftDropN3',
]) ),
process, task)
# Patify AK8 PF PUPPI
addJetCollection(process, postfix=postfix, labelName = 'AK8Puppi',
jetSource = cms.InputTag('ak8PFJetsPuppi'+postfix),
algo= 'AK', rParam = 0.8,
jetCorrections = ('AK8PFPuppi', cms.vstring(['L2Relative', 'L3Absolute']), 'None'),
btagDiscriminators = ([
'pfCombinedSecondaryVertexV2BJetTags',
'pfCombinedInclusiveSecondaryVertexV2BJetTags',
'pfCombinedMVAV2BJetTags',
'pfDeepCSVJetTags:probb',
'pfDeepCSVJetTags:probc',
'pfDeepCSVJetTags:probudsg',
'pfDeepCSVJetTags:probbb',
'pfBoostedDoubleSecondaryVertexAK8BJetTags']),
genJetCollection = cms.InputTag('slimmedGenJetsAK8')
)
getattr(process,"patJetsAK8Puppi"+postfix).userData.userFloats.src = [] # start with empty list of user floats
getattr(process,"selectedPatJetsAK8Puppi"+postfix).cut = cms.string("pt > 100")
getattr(process,"selectedPatJetsAK8Puppi"+postfix).cutLoose = cms.string("pt > 30")
getattr(process,"selectedPatJetsAK8Puppi"+postfix).nLoose = cms.uint32(3)
from RecoJets.JetAssociationProducers.j2tParametersVX_cfi import j2tParametersVX
addToProcessAndTask('ak8PFJetsPuppiTracksAssociatorAtVertex'+postfix, cms.EDProducer("JetTracksAssociatorAtVertex",
j2tParametersVX.clone( coneSize = cms.double(0.8) ),
jets = cms.InputTag("ak8PFJetsPuppi") ),
process, task)
addToProcessAndTask('patJetAK8PuppiCharge'+postfix, cms.EDProducer("JetChargeProducer",
src = cms.InputTag("ak8PFJetsPuppiTracksAssociatorAtVertex"),
var = cms.string('Pt'),
exp = cms.double(1.0) ),
process, task)
## now add AK8 groomed masses and ECF
from RecoJets.JetProducers.ak8PFJetsPuppi_groomingValueMaps_cfi import ak8PFJetsPuppiSoftDropMass
addToProcessAndTask('ak8PFJetsPuppiSoftDropMass'+postfix, ak8PFJetsPuppiSoftDropMass.clone(), process, task)
getattr(process,"patJetsAK8Puppi"+postfix).userData.userFloats.src += ['ak8PFJetsPuppiSoftDropMass'+postfix]
getattr(process,"patJetsAK8Puppi"+postfix).addTagInfos = cms.bool(False)
getattr(process,"patJetsAK8Puppi"+postfix).userData.userFloats.src += [
cms.InputTag('ak8PFJetsPuppiSoftDropValueMap'+postfix,'nb1AK8PuppiSoftDropN2'),
cms.InputTag('ak8PFJetsPuppiSoftDropValueMap'+postfix,'nb1AK8PuppiSoftDropN3'),
cms.InputTag('ak8PFJetsPuppiSoftDropValueMap'+postfix,'nb2AK8PuppiSoftDropN2'),
cms.InputTag('ak8PFJetsPuppiSoftDropValueMap'+postfix,'nb2AK8PuppiSoftDropN3'),
]
# add PUPPI Njetiness
addToProcessAndTask('NjettinessAK8Puppi'+postfix, process.Njettiness.clone(), process, task)
getattr(process,"NjettinessAK8Puppi"+postfix).src = cms.InputTag("ak8PFJetsPuppi"+postfix)
getattr(process,"NjettinessAK8Puppi").cone = cms.double(0.8)
getattr(process,"patJetsAK8Puppi").userData.userFloats.src += ['NjettinessAK8Puppi'+postfix+':tau1','NjettinessAK8Puppi'+postfix+':tau2','NjettinessAK8Puppi'+postfix+':tau3','NjettinessAK8Puppi'+postfix+':tau4']
# Now combine the CHS and PUPPI information into the PUPPI jets via delta R value maps
addToProcessAndTask("ak8PFJetsCHSValueMap"+postfix, cms.EDProducer("RecoJetToPatJetDeltaRValueMapProducer",
src = cms.InputTag("ak8PFJetsPuppi"+postfix),
matched = cms.InputTag("patJetsAK8"+postfix),
distMax = cms.double(0.8),
values = cms.vstring([
'userFloat("ak8PFJetsCHSPrunedMass"'+postfix+')',
'userFloat("ak8PFJetsCHSSoftDropMass"'+postfix+')',
'userFloat("NjettinessAK8'+postfix+':tau1")',
'userFloat("NjettinessAK8'+postfix+':tau2")',
'userFloat("NjettinessAK8'+postfix+':tau3")',
'userFloat("NjettinessAK8'+postfix+':tau4")',
'pt','eta','phi','mass', 'jetArea', 'jecFactor(0)'
]),
valueLabels = cms.vstring( [
'ak8PFJetsCHSPrunedMass',
'ak8PFJetsCHSSoftDropMass',
'NjettinessAK8CHSTau1',
'NjettinessAK8CHSTau2',
'NjettinessAK8CHSTau3',
'NjettinessAK8CHSTau4',
'pt','eta','phi','mass', 'jetArea', 'rawFactor'
]) ),
process, task)
# Now set up the user floats
getattr(process,"patJetsAK8Puppi"+postfix).userData.userFloats.src += [
cms.InputTag('ak8PFJetsCHSValueMap'+postfix,'ak8PFJetsCHSPrunedMass'),
cms.InputTag('ak8PFJetsCHSValueMap'+postfix,'ak8PFJetsCHSSoftDropMass'),
cms.InputTag('ak8PFJetsCHSValueMap'+postfix,'NjettinessAK8CHSTau1'),
cms.InputTag('ak8PFJetsCHSValueMap'+postfix,'NjettinessAK8CHSTau2'),
cms.InputTag('ak8PFJetsCHSValueMap'+postfix,'NjettinessAK8CHSTau3'),
cms.InputTag('ak8PFJetsCHSValueMap'+postfix,'NjettinessAK8CHSTau4'),
cms.InputTag('ak8PFJetsCHSValueMap'+postfix,'pt'),
cms.InputTag('ak8PFJetsCHSValueMap'+postfix,'eta'),
cms.InputTag('ak8PFJetsCHSValueMap'+postfix,'phi'),
cms.InputTag('ak8PFJetsCHSValueMap'+postfix,'mass'),
cms.InputTag('ak8PFJetsCHSValueMap'+postfix,'jetArea'),
cms.InputTag('ak8PFJetsCHSValueMap'+postfix,'rawFactor'),
]
addToProcessAndTask("slimmedJetsAK8PFPuppiSoftDropSubjets"+postfix,
cms.EDProducer("PATJetSlimmer",
src = cms.InputTag("selectedPatJetsAK8PFPuppiSoftDropSubjets"),
packedPFCandidates = cms.InputTag("packedPFCandidates"),
dropJetVars = cms.string("1"),
dropDaughters = cms.string("0"),
rekeyDaughters = cms.string("1"),
dropTrackRefs = cms.string("1"),
dropSpecific = cms.string("1"),
dropTagInfos = cms.string("1"),
modifyJets = cms.bool(True),
mixedDaughters = cms.bool(False),
modifierConfig = cms.PSet( modifications = cms.VPSet() )
),
process, task)
## Establish references between PATified fat jets and subjets using the BoostedJetMerger
addToProcessAndTask("slimmedJetsAK8PFPuppiSoftDropPacked"+postfix,
cms.EDProducer("BoostedJetMerger",
jetSrc=cms.InputTag("selectedPatJetsAK8PFPuppiSoftDrop"),
subjetSrc=cms.InputTag("slimmedJetsAK8PFPuppiSoftDropSubjets")
),
process, task )
addToProcessAndTask("packedPatJetsAK8"+postfix, cms.EDProducer("JetSubstructurePacker",
jetSrc = cms.InputTag("selectedPatJetsAK8Puppi"+postfix),
distMax = cms.double(0.8),
algoTags = cms.VInputTag(
cms.InputTag("slimmedJetsAK8PFPuppiSoftDropPacked"+postfix)
),
algoLabels = cms.vstring(
'SoftDropPuppi'
),
fixDaughters = cms.bool(True),
packedPFCandidates = cms.InputTag("packedPFCandidates"+postfix),
),
process, task)
# switch off daughter re-keying since it's done in the JetSubstructurePacker (and can't be done afterwards)
process.slimmedJetsAK8.rekeyDaughters = "0"
# Reconfigure the slimmedAK8 jet information to keep
process.slimmedJetsAK8.dropDaughters = cms.string("pt < 170")
process.slimmedJetsAK8.dropSpecific = cms.string("pt < 170")
process.slimmedJetsAK8.dropTagInfos = cms.string("pt < 170")
| 66.717042
| 248
| 0.603644
|
import FWCore.ParameterSet.Config as cms
from PhysicsTools.PatAlgos.tools.helpers import getPatAlgosToolsTask, addToProcessAndTask
def applySubstructure( process, postfix="" ) :
task = getPatAlgosToolsTask(process)
from PhysicsTools.PatAlgos.tools.jetTools import addJetCollection
from PhysicsTools.PatAlgos.producersLayer1.jetProducer_cfi import _patJets as patJetsDefault
from RecoJets.JetProducers.ak4PFJets_cfi import ak4PFJetsPuppi
from RecoJets.JetProducers.ak8PFJets_cfi import ak8PFJetsPuppi, ak8PFJetsPuppiSoftDrop, ak8PFJetsPuppiConstituents, ak8PFJetsCHSConstituents
from RecoJets.JetProducers.ak8GenJets_cfi import ak8GenJets, ak8GenJetsSoftDrop, ak8GenJetsConstituents
addToProcessAndTask('ak4PFJetsPuppi'+postfix,ak4PFJetsPuppi.clone(), process, task)
addToProcessAndTask('ak8PFJetsPuppi'+postfix,ak8PFJetsPuppi.clone(), process, task)
addToProcessAndTask('ak8PFJetsPuppiConstituents', ak8PFJetsPuppiConstituents.clone(cut = cms.string('pt > 170.0 && abs(rapidity()) < 2.4') ), process, task )
addToProcessAndTask('ak8PFJetsCHSConstituents', ak8PFJetsCHSConstituents.clone(), process, task )
addToProcessAndTask('ak8PFJetsPuppiSoftDrop'+postfix, ak8PFJetsPuppiSoftDrop.clone( src = cms.InputTag('ak8PFJetsPuppiConstituents', 'constituents') ), process, task)
addToProcessAndTask('ak8GenJetsNoNuConstituents'+postfix, ak8GenJetsConstituents.clone(src='ak8GenJetsNoNu'), process, task )
addToProcessAndTask('ak8GenJetsNoNuSoftDrop'+postfix,ak8GenJetsSoftDrop.clone(src=cms.InputTag('ak8GenJetsNoNuConstituents'+postfix, 'constituents')),process,task)
addToProcessAndTask('slimmedGenJetsAK8SoftDropSubJets'+postfix,
cms.EDProducer("PATGenJetSlimmer",
src = cms.InputTag("ak8GenJetsNoNuSoftDrop"+postfix, "SubJets"),
packedGenParticles = cms.InputTag("packedGenParticles"),
cut = cms.string(""),
cutLoose = cms.string(""),
nLoose = cms.uint32(0),
clearDaughters = cms.bool(False),
dropSpecific = cms.bool(True),
), process, task )
addJetCollection(process, postfix=postfix, labelName = 'AK8',
jetSource = cms.InputTag('ak8PFJetsCHS'+postfix),
algo= 'AK', rParam = 0.8,
btagDiscriminators = ['None'],
jetCorrections = ('AK8PFchs', cms.vstring(['L1FastJet', 'L2Relative', 'L3Absolute']), 'None'),
genJetCollection = cms.InputTag('slimmedGenJetsAK8')
)
getattr(process,"patJetsAK8"+postfix).userData.userFloats.src = []
getattr(process,"selectedPatJetsAK8").cut = cms.string("pt > 170")
RecoPFJets_cff import ak8PFJetsCHSPruned, ak8PFJetsCHSSoftDrop
addToProcessAndTask('ak8PFJetsCHSPruned'+postfix, ak8PFJetsCHSPruned.clone(), process, task)
addToProcessAndTask('ak8PFJetsCHSSoftDrop'+postfix, ak8PFJetsCHSSoftDrop.clone(), process, task)
from RecoJets.JetProducers.ak8PFJetsCHS_groomingValueMaps_cfi import ak8PFJetsCHSPrunedMass, ak8PFJetsCHSTrimmedMass, ak8PFJetsCHSFilteredMass, ak8PFJetsCHSSoftDropMass
addToProcessAndTask('ak8PFJetsCHSPrunedMass'+postfix, ak8PFJetsCHSPrunedMass.clone(), process, task)
addToProcessAndTask('ak8PFJetsCHSTrimmedMass'+postfix, ak8PFJetsCHSTrimmedMass.clone(), process, task)
addToProcessAndTask('ak8PFJetsCHSFilteredMass'+postfix, ak8PFJetsCHSFilteredMass.clone(), process, task)
addToProcessAndTask('ak8PFJetsCHSSoftDropMass'+postfix, ak8PFJetsCHSSoftDropMass.clone(), process, task)
getattr(process,"patJetsAK8").userData.userFloats.src += ['ak8PFJetsCHSPrunedMass'+postfix,'ak8PFJetsCHSSoftDropMass'+postfix]
getattr(process,"patJetsAK8").addTagInfos = cms.bool(False)
process.load('RecoJets.JetProducers.nJettinessAdder_cfi')
task.add(process.Njettiness)
addToProcessAndTask('NjettinessAK8'+postfix, process.Njettiness.clone(), process, task)
getattr(process,"NjettinessAK8").src = cms.InputTag("ak8PFJetsCHS"+postfix)
getattr(process,"NjettinessAK8").cone = cms.double(0.8)
getattr(process,"patJetsAK8").userData.userFloats.src += ['NjettinessAK8'+postfix+':tau1','NjettinessAK8'+postfix+':tau2','NjettinessAK8'+postfix+':tau3','NjettinessAK8'+postfix+':tau4']
addToProcessAndTask('NjettinessAK8Subjets'+postfix, process.Njettiness.clone(), process, task)
getattr(process,"NjettinessAK8Subjets"+postfix).src = cms.InputTag("ak8PFJetsPuppiSoftDrop"+postfix, "SubJets")
getattr(process,"NjettinessAK8Subjets").cone = cms.double(0.8)
process,
postfix=postfix,
labelName = 'AK8PFCHSSoftDrop',
jetSource = cms.InputTag('ak8PFJetsCHSSoftDrop'+postfix),
btagDiscriminators = ['None'],
jetCorrections = ('AK8PFchs', ['L1FastJet', 'L2Relative', 'L3Absolute'], 'None'),
getJetMCFlavour = False
)
ocess,
postfix=postfix,
labelName = 'AK8PFPuppiSoftDrop' + postfix,
jetSource = cms.InputTag('ak8PFJetsPuppiSoftDrop'+postfix),
btagDiscriminators = ['None'],
genJetCollection = cms.InputTag('slimmedGenJetsAK8'),
jetCorrections = ('AK8PFPuppi', ['L2Relative', 'L3Absolute'], 'None'),
getJetMCFlavour = False
)
process,
postfix=postfix,
labelName = 'AK8PFPuppiSoftDropSubjets',
jetSource = cms.InputTag('ak8PFJetsPuppiSoftDrop'+postfix,'SubJets'),
algo = 'ak',
rParam = 0.8,
btagDiscriminators = ['pfDeepCSVJetTags:probb', 'pfDeepCSVJetTags:probbb', 'pfCombinedInclusiveSecondaryVertexV2BJetTags','pfCombinedMVAV2BJetTags'],
jetCorrections = ('AK4PFPuppi', ['L2Relative', 'L3Absolute'], 'None'),
explicitJTA = True,
svClustering = True,
genJetCollection = cms.InputTag('slimmedGenJetsAK8SoftDropSubJets'),
fatJets=cms.InputTag('ak8PFJetsPuppi'),
groomedFatJets=cms.InputTag('ak8PFJetsPuppiSoftDrop')
)
process.load('RecoJets.JetProducers.ECF_cff')
addToProcessAndTask('nb1AK8PuppiSoftDrop'+postfix, process.ecfNbeta1.clone(src = cms.InputTag("ak8PFJetsPuppiSoftDrop"+postfix), cuts = cms.vstring('', '', 'pt > 250')), process, task)
addToProcessAndTask('nb2AK8PuppiSoftDrop'+postfix, process.ecfNbeta2.clone(src = cms.InputTag("ak8PFJetsPuppiSoftDrop"+postfix), cuts = cms.vstring('', '', 'pt > 250')), process, task)
from Configuration.Eras.Modifier_pp_on_AA_2018_cff import pp_on_AA_2018
from Configuration.Eras.Modifier_pp_on_XeXe_2017_cff import pp_on_XeXe_2017
from Configuration.Eras.Modifier_phase2_common_cff import phase2_common
for e in [pp_on_XeXe_2017, pp_on_AA_2018, phase2_common]:
e.toModify(getattr(process,'nb1AK8PuppiSoftDrop'+postfix), cuts = ['pt > 999999', 'pt > 999999', 'pt > 999999'] )
e.toModify(getattr(process,'nb2AK8PuppiSoftDrop'+postfix), cuts = ['pt > 999999', 'pt > 999999', 'pt > 999999'] )
getattr(process,"patJetsAK8PFPuppiSoftDrop").userData.userFloats.src += ['nb1AK8PuppiSoftDrop'+postfix+':ecfN2','nb1AK8PuppiSoftDrop'+postfix+':ecfN3']
getattr(process,"patJetsAK8PFPuppiSoftDrop").userData.userFloats.src += ['nb2AK8PuppiSoftDrop'+postfix+':ecfN2','nb2AK8PuppiSoftDrop'+postfix+':ecfN3']
addToProcessAndTask('nb1AK8PuppiSoftDropSubjets'+postfix, process.ecfNbeta1.clone(src = cms.InputTag("ak8PFJetsPuppiSoftDrop"+postfix, "SubJets")), process, task)
addToProcessAndTask('nb2AK8PuppiSoftDropSubjets'+postfix, process.ecfNbeta2.clone(src = cms.InputTag("ak8PFJetsPuppiSoftDrop"+postfix, "SubJets")), process, task)
getattr(process,"patJetsAK8PFPuppiSoftDropSubjets"+postfix).userData.userFloats.src += ['nb1AK8PuppiSoftDropSubjets'+postfix+':ecfN2','nb1AK8PuppiSoftDropSubjets'+postfix+':ecfN3']
getattr(process,"patJetsAK8PFPuppiSoftDropSubjets"+postfix).userData.userFloats.src += ['nb2AK8PuppiSoftDropSubjets'+postfix+':ecfN2','nb2AK8PuppiSoftDropSubjets'+postfix+':ecfN3']
getattr(process,"patJetsAK8PFPuppiSoftDropSubjets"+postfix).userData.userFloats.src += ['NjettinessAK8Subjets'+postfix+':tau1','NjettinessAK8Subjets'+postfix+':tau2','NjettinessAK8Subjets'+postfix+':tau3','NjettinessAK8Subjets'+postfix+':tau4']
for e in [pp_on_XeXe_2017, pp_on_AA_2018, phase2_common]:
e.toModify(getattr(process,'nb1AK8PuppiSoftDropSubjets'+postfix), cuts = ['pt > 999999', 'pt > 999999', 'pt > 999999'] )
e.toModify(getattr(process,'nb2AK8PuppiSoftDropSubjets'+postfix), cuts = ['pt > 999999', 'pt > 999999', 'pt > 999999'] )
addToProcessAndTask("ak8PFJetsPuppiSoftDropValueMap"+postfix,
cms.EDProducer("RecoJetToPatJetDeltaRValueMapProducer",
src = cms.InputTag("ak8PFJetsPuppi"+postfix),
matched = cms.InputTag("patJetsAK8PFPuppiSoftDrop"+postfix),
distMax = cms.double(0.8),
values = cms.vstring([
'userFloat("nb1AK8PuppiSoftDrop'+postfix+':ecfN2")',
'userFloat("nb1AK8PuppiSoftDrop'+postfix+':ecfN3")',
'userFloat("nb2AK8PuppiSoftDrop'+postfix+':ecfN2")',
'userFloat("nb2AK8PuppiSoftDrop'+postfix+':ecfN3")',
]),
valueLabels = cms.vstring( [
'nb1AK8PuppiSoftDropN2',
'nb1AK8PuppiSoftDropN3',
'nb2AK8PuppiSoftDropN2',
'nb2AK8PuppiSoftDropN3',
]) ),
process, task)
addJetCollection(process, postfix=postfix, labelName = 'AK8Puppi',
jetSource = cms.InputTag('ak8PFJetsPuppi'+postfix),
algo= 'AK', rParam = 0.8,
jetCorrections = ('AK8PFPuppi', cms.vstring(['L2Relative', 'L3Absolute']), 'None'),
btagDiscriminators = ([
'pfCombinedSecondaryVertexV2BJetTags',
'pfCombinedInclusiveSecondaryVertexV2BJetTags',
'pfCombinedMVAV2BJetTags',
'pfDeepCSVJetTags:probb',
'pfDeepCSVJetTags:probc',
'pfDeepCSVJetTags:probudsg',
'pfDeepCSVJetTags:probbb',
'pfBoostedDoubleSecondaryVertexAK8BJetTags']),
genJetCollection = cms.InputTag('slimmedGenJetsAK8')
)
getattr(process,"patJetsAK8Puppi"+postfix).userData.userFloats.src = []
getattr(process,"selectedPatJetsAK8Puppi"+postfix).cut = cms.string("pt > 100")
getattr(process,"selectedPatJetsAK8Puppi"+postfix).cutLoose = cms.string("pt > 30")
getattr(process,"selectedPatJetsAK8Puppi"+postfix).nLoose = cms.uint32(3)
from RecoJets.JetAssociationProducers.j2tParametersVX_cfi import j2tParametersVX
addToProcessAndTask('ak8PFJetsPuppiTracksAssociatorAtVertex'+postfix, cms.EDProducer("JetTracksAssociatorAtVertex",
j2tParametersVX.clone( coneSize = cms.double(0.8) ),
jets = cms.InputTag("ak8PFJetsPuppi") ),
process, task)
addToProcessAndTask('patJetAK8PuppiCharge'+postfix, cms.EDProducer("JetChargeProducer",
src = cms.InputTag("ak8PFJetsPuppiTracksAssociatorAtVertex"),
var = cms.string('Pt'),
exp = cms.double(1.0) ),
process, task)
FJetsPuppi_groomingValueMaps_cfi import ak8PFJetsPuppiSoftDropMass
addToProcessAndTask('ak8PFJetsPuppiSoftDropMass'+postfix, ak8PFJetsPuppiSoftDropMass.clone(), process, task)
getattr(process,"patJetsAK8Puppi"+postfix).userData.userFloats.src += ['ak8PFJetsPuppiSoftDropMass'+postfix]
getattr(process,"patJetsAK8Puppi"+postfix).addTagInfos = cms.bool(False)
getattr(process,"patJetsAK8Puppi"+postfix).userData.userFloats.src += [
cms.InputTag('ak8PFJetsPuppiSoftDropValueMap'+postfix,'nb1AK8PuppiSoftDropN2'),
cms.InputTag('ak8PFJetsPuppiSoftDropValueMap'+postfix,'nb1AK8PuppiSoftDropN3'),
cms.InputTag('ak8PFJetsPuppiSoftDropValueMap'+postfix,'nb2AK8PuppiSoftDropN2'),
cms.InputTag('ak8PFJetsPuppiSoftDropValueMap'+postfix,'nb2AK8PuppiSoftDropN3'),
]
addToProcessAndTask('NjettinessAK8Puppi'+postfix, process.Njettiness.clone(), process, task)
getattr(process,"NjettinessAK8Puppi"+postfix).src = cms.InputTag("ak8PFJetsPuppi"+postfix)
getattr(process,"NjettinessAK8Puppi").cone = cms.double(0.8)
getattr(process,"patJetsAK8Puppi").userData.userFloats.src += ['NjettinessAK8Puppi'+postfix+':tau1','NjettinessAK8Puppi'+postfix+':tau2','NjettinessAK8Puppi'+postfix+':tau3','NjettinessAK8Puppi'+postfix+':tau4']
addToProcessAndTask("ak8PFJetsCHSValueMap"+postfix, cms.EDProducer("RecoJetToPatJetDeltaRValueMapProducer",
src = cms.InputTag("ak8PFJetsPuppi"+postfix),
matched = cms.InputTag("patJetsAK8"+postfix),
distMax = cms.double(0.8),
values = cms.vstring([
'userFloat("ak8PFJetsCHSPrunedMass"'+postfix+')',
'userFloat("ak8PFJetsCHSSoftDropMass"'+postfix+')',
'userFloat("NjettinessAK8'+postfix+':tau1")',
'userFloat("NjettinessAK8'+postfix+':tau2")',
'userFloat("NjettinessAK8'+postfix+':tau3")',
'userFloat("NjettinessAK8'+postfix+':tau4")',
'pt','eta','phi','mass', 'jetArea', 'jecFactor(0)'
]),
valueLabels = cms.vstring( [
'ak8PFJetsCHSPrunedMass',
'ak8PFJetsCHSSoftDropMass',
'NjettinessAK8CHSTau1',
'NjettinessAK8CHSTau2',
'NjettinessAK8CHSTau3',
'NjettinessAK8CHSTau4',
'pt','eta','phi','mass', 'jetArea', 'rawFactor'
]) ),
process, task)
getattr(process,"patJetsAK8Puppi"+postfix).userData.userFloats.src += [
cms.InputTag('ak8PFJetsCHSValueMap'+postfix,'ak8PFJetsCHSPrunedMass'),
cms.InputTag('ak8PFJetsCHSValueMap'+postfix,'ak8PFJetsCHSSoftDropMass'),
cms.InputTag('ak8PFJetsCHSValueMap'+postfix,'NjettinessAK8CHSTau1'),
cms.InputTag('ak8PFJetsCHSValueMap'+postfix,'NjettinessAK8CHSTau2'),
cms.InputTag('ak8PFJetsCHSValueMap'+postfix,'NjettinessAK8CHSTau3'),
cms.InputTag('ak8PFJetsCHSValueMap'+postfix,'NjettinessAK8CHSTau4'),
cms.InputTag('ak8PFJetsCHSValueMap'+postfix,'pt'),
cms.InputTag('ak8PFJetsCHSValueMap'+postfix,'eta'),
cms.InputTag('ak8PFJetsCHSValueMap'+postfix,'phi'),
cms.InputTag('ak8PFJetsCHSValueMap'+postfix,'mass'),
cms.InputTag('ak8PFJetsCHSValueMap'+postfix,'jetArea'),
cms.InputTag('ak8PFJetsCHSValueMap'+postfix,'rawFactor'),
]
addToProcessAndTask("slimmedJetsAK8PFPuppiSoftDropSubjets"+postfix,
cms.EDProducer("PATJetSlimmer",
src = cms.InputTag("selectedPatJetsAK8PFPuppiSoftDropSubjets"),
packedPFCandidates = cms.InputTag("packedPFCandidates"),
dropJetVars = cms.string("1"),
dropDaughters = cms.string("0"),
rekeyDaughters = cms.string("1"),
dropTrackRefs = cms.string("1"),
dropSpecific = cms.string("1"),
dropTagInfos = cms.string("1"),
modifyJets = cms.bool(True),
mixedDaughters = cms.bool(False),
modifierConfig = cms.PSet( modifications = cms.VPSet() )
),
process, task)
cms.EDProducer("BoostedJetMerger",
jetSrc=cms.InputTag("selectedPatJetsAK8PFPuppiSoftDrop"),
subjetSrc=cms.InputTag("slimmedJetsAK8PFPuppiSoftDropSubjets")
),
process, task )
addToProcessAndTask("packedPatJetsAK8"+postfix, cms.EDProducer("JetSubstructurePacker",
jetSrc = cms.InputTag("selectedPatJetsAK8Puppi"+postfix),
distMax = cms.double(0.8),
algoTags = cms.VInputTag(
cms.InputTag("slimmedJetsAK8PFPuppiSoftDropPacked"+postfix)
),
algoLabels = cms.vstring(
'SoftDropPuppi'
),
fixDaughters = cms.bool(True),
packedPFCandidates = cms.InputTag("packedPFCandidates"+postfix),
),
process, task)
process.slimmedJetsAK8.rekeyDaughters = "0"
process.slimmedJetsAK8.dropDaughters = cms.string("pt < 170")
process.slimmedJetsAK8.dropSpecific = cms.string("pt < 170")
process.slimmedJetsAK8.dropTagInfos = cms.string("pt < 170")
| true
| true
|
f7056453a1c75203ba1b816e70ba850dc52f30e4
| 3,232
|
py
|
Python
|
scripts/oldScripts2019/3_analyzeDataKnee_Participant1.py
|
oliviermirat/Scientizen
|
e06515acbdc2cc2dc22445489dec2df4af454920
|
[
"MIT"
] | 3
|
2017-06-10T10:41:55.000Z
|
2017-06-26T10:24:41.000Z
|
scripts/oldScripts2019/3_analyzeDataKnee_Participant1.py
|
oliviermirat/Scientizen
|
e06515acbdc2cc2dc22445489dec2df4af454920
|
[
"MIT"
] | 56
|
2020-05-19T16:06:59.000Z
|
2020-11-11T13:49:13.000Z
|
scripts/oldScripts2019/3_analyzeDataKnee_Participant1.py
|
oliviermirat/Scientizen
|
e06515acbdc2cc2dc22445489dec2df4af454920
|
[
"MIT"
] | 12
|
2020-05-19T18:27:26.000Z
|
2021-02-26T15:39:33.000Z
|
# This scripts assumes that the dataframe has been created and saved in data.txt
import pickle
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from dataFrameUtilities import addInsultIntensityColumns, getInsultAboveThreshold, getPainAboveThreshold, selectColumns,selectTime
from sklearn.preprocessing import MinMaxScaler
# Getting data
input = open("../data/preprocessed/preprocessedDataParticipant1.txt", "rb")
data = pickle.load(input)
input.close()
timeSelected = selectTime(data, "2016-09-01", "2019-10-20")
# Removing "steps" caused by scooter riding
timeSelected["steps"] = timeSelected["steps"] - 37 * timeSelected["scooterRiding"]
timeSelected["steps"][timeSelected["steps"] < 0] = 0
# Getting knee pain information
kneePain = selectColumns(timeSelected, ["kneePain"])
thres = kneePain.copy()
thres[:] = 3.3
# Calculating knee stress over time
env = addInsultIntensityColumns(timeSelected, ["steps", "kneePain"], 21, 30)
envRollingMean = selectColumns(env, ["stepsInsultIntensity"])
envMaxInsultDiff = selectColumns(env, ["stepsMaxInsultDiff"])
kneePainRollingMean = selectColumns(env, ["kneePainInsultIntensity"])
kneePainRollingMean = kneePainRollingMean.replace(0, 0.4)
scaler = MinMaxScaler()
kneePainRollingMeanArray = scaler.fit_transform(kneePainRollingMean)
for i in range(0, len(kneePainRollingMean)):
kneePainRollingMean["kneePainInsultIntensity"][i] = kneePainRollingMeanArray[i]
kneePainRollingMean = kneePainRollingMean.replace(0.0, 0.4)
thres2 = kneePain.copy()
thres2[:] = 1.1
for i in range(0, 300):
thres2["kneePain"][i] = 1.2
for i in range(810, len(thres2)):
thres2["kneePain"][i] = 1.8
envBrut = selectColumns(env, ["steps"])
betterMaxInsult = envMaxInsultDiff.copy()
scaler = MinMaxScaler()
betterMaxInsultArray = scaler.fit_transform(betterMaxInsult)
for i in range(0, len(betterMaxInsult)):
betterMaxInsult["stepsMaxInsultDiff"][i] = betterMaxInsultArray[i] + envBrut["steps"][i] + kneePainRollingMean["kneePainInsultIntensity"][i]
# Finding time points where knee pain and knee stress are above a certain threshold
painAboveThresh = getPainAboveThreshold(kneePain, "kneePain", 3.3)
painAboveThresh = selectColumns(painAboveThresh, ["kneePainThreshed"])
stepsMaxInsultDiffThresh = getInsultAboveThreshold(betterMaxInsult, "stepsMaxInsultDiff", thres2)
stepsMaxInsultDiffThresh = selectColumns(stepsMaxInsultDiffThresh, ["stepsMaxInsultDiffThreshed"])
# Plotting results
fig, axes = plt.subplots(nrows=3, ncols=1)
selectColumns(kneePain, ["kneePain"]).rename(columns={"kneePain": "knee pain"}).plot(ax=axes[0])
thres.rename(columns={"kneePain": "pain threshold"}).plot(ax=axes[0])
selectColumns(betterMaxInsult, ["stepsMaxInsultDiff"]).rename(columns={"stepsMaxInsultDiff": "knee stress"}).plot(ax=axes[1])
thres2.rename(columns={"kneePain": "knee stress threshold"}).plot(ax=axes[1])
painAboveThresh.rename(columns={"kneePainThreshed": "knee pain is above threshold"}).plot(ax=axes[2])
stepsMaxInsultDiffThresh = 0.95 * stepsMaxInsultDiffThresh
stepsMaxInsultDiffThresh.rename(columns={"stepsMaxInsultDiffThreshed": "knee stress is above threshold"}).plot(ax=axes[2])
leg = plt.legend(loc="best")
leg.set_draggable(True)
plt.show()
| 35.911111
| 144
| 0.775062
|
import pickle
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from dataFrameUtilities import addInsultIntensityColumns, getInsultAboveThreshold, getPainAboveThreshold, selectColumns,selectTime
from sklearn.preprocessing import MinMaxScaler
input = open("../data/preprocessed/preprocessedDataParticipant1.txt", "rb")
data = pickle.load(input)
input.close()
timeSelected = selectTime(data, "2016-09-01", "2019-10-20")
timeSelected["steps"] = timeSelected["steps"] - 37 * timeSelected["scooterRiding"]
timeSelected["steps"][timeSelected["steps"] < 0] = 0
kneePain = selectColumns(timeSelected, ["kneePain"])
thres = kneePain.copy()
thres[:] = 3.3
env = addInsultIntensityColumns(timeSelected, ["steps", "kneePain"], 21, 30)
envRollingMean = selectColumns(env, ["stepsInsultIntensity"])
envMaxInsultDiff = selectColumns(env, ["stepsMaxInsultDiff"])
kneePainRollingMean = selectColumns(env, ["kneePainInsultIntensity"])
kneePainRollingMean = kneePainRollingMean.replace(0, 0.4)
scaler = MinMaxScaler()
kneePainRollingMeanArray = scaler.fit_transform(kneePainRollingMean)
for i in range(0, len(kneePainRollingMean)):
kneePainRollingMean["kneePainInsultIntensity"][i] = kneePainRollingMeanArray[i]
kneePainRollingMean = kneePainRollingMean.replace(0.0, 0.4)
thres2 = kneePain.copy()
thres2[:] = 1.1
for i in range(0, 300):
thres2["kneePain"][i] = 1.2
for i in range(810, len(thres2)):
thres2["kneePain"][i] = 1.8
envBrut = selectColumns(env, ["steps"])
betterMaxInsult = envMaxInsultDiff.copy()
scaler = MinMaxScaler()
betterMaxInsultArray = scaler.fit_transform(betterMaxInsult)
for i in range(0, len(betterMaxInsult)):
betterMaxInsult["stepsMaxInsultDiff"][i] = betterMaxInsultArray[i] + envBrut["steps"][i] + kneePainRollingMean["kneePainInsultIntensity"][i]
painAboveThresh = getPainAboveThreshold(kneePain, "kneePain", 3.3)
painAboveThresh = selectColumns(painAboveThresh, ["kneePainThreshed"])
stepsMaxInsultDiffThresh = getInsultAboveThreshold(betterMaxInsult, "stepsMaxInsultDiff", thres2)
stepsMaxInsultDiffThresh = selectColumns(stepsMaxInsultDiffThresh, ["stepsMaxInsultDiffThreshed"])
fig, axes = plt.subplots(nrows=3, ncols=1)
selectColumns(kneePain, ["kneePain"]).rename(columns={"kneePain": "knee pain"}).plot(ax=axes[0])
thres.rename(columns={"kneePain": "pain threshold"}).plot(ax=axes[0])
selectColumns(betterMaxInsult, ["stepsMaxInsultDiff"]).rename(columns={"stepsMaxInsultDiff": "knee stress"}).plot(ax=axes[1])
thres2.rename(columns={"kneePain": "knee stress threshold"}).plot(ax=axes[1])
painAboveThresh.rename(columns={"kneePainThreshed": "knee pain is above threshold"}).plot(ax=axes[2])
stepsMaxInsultDiffThresh = 0.95 * stepsMaxInsultDiffThresh
stepsMaxInsultDiffThresh.rename(columns={"stepsMaxInsultDiffThreshed": "knee stress is above threshold"}).plot(ax=axes[2])
leg = plt.legend(loc="best")
leg.set_draggable(True)
plt.show()
| true
| true
|
f7056490fe820c5bc371a05eb7d52b47ad934ff3
| 146
|
py
|
Python
|
boa/_version.py
|
duncanmmacleod/boa
|
4a42cfd62b1e907c95737bb3079bbf626db62992
|
[
"BSD-3-Clause"
] | 4
|
2020-05-27T15:58:36.000Z
|
2020-05-28T20:50:42.000Z
|
boa/_version.py
|
wolfv/boa
|
a1be462ed015a47561c27c4e1ef4c0972095017d
|
[
"BSD-3-Clause"
] | null | null | null |
boa/_version.py
|
wolfv/boa
|
a1be462ed015a47561c27c4e1ef4c0972095017d
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright (C) 2021, QuantStack
# SPDX-License-Identifier: BSD-3-Clause
version_info = (0, 7, 0)
__version__ = ".".join(map(str, version_info))
| 24.333333
| 46
| 0.705479
|
version_info = (0, 7, 0)
__version__ = ".".join(map(str, version_info))
| true
| true
|
f70564d7a09d116f125a022a0767e5a6e0d36386
| 3,260
|
py
|
Python
|
api/tests/test_auth_emailactivation.py
|
smegurus/smegurus-django
|
053973b5ff0b997c52bfaca8daf8e07db64a877c
|
[
"BSD-4-Clause"
] | 1
|
2020-07-16T10:58:23.000Z
|
2020-07-16T10:58:23.000Z
|
api/tests/test_auth_emailactivation.py
|
smegurus/smegurus-django
|
053973b5ff0b997c52bfaca8daf8e07db64a877c
|
[
"BSD-4-Clause"
] | 13
|
2018-11-30T02:29:39.000Z
|
2022-03-11T23:35:49.000Z
|
api/tests/test_auth_emailactivation.py
|
smegurus/smegurus-django
|
053973b5ff0b997c52bfaca8daf8e07db64a877c
|
[
"BSD-4-Clause"
] | null | null | null |
from django.core.urlresolvers import resolve, reverse
from django.db import transaction
from django.test import TestCase
from django.test import Client
from django.utils import translation
from django.contrib.auth.models import User, Group
from django.contrib.auth import authenticate, login, logout
from rest_framework import status
from rest_framework.test import APIClient
from rest_framework.test import APITestCase
from rest_framework.authtoken.models import Token
from django_tenants.test.cases import TenantTestCase
from django_tenants.test.client import TenantClient
from smegurus import constants
TEST_USER_EMAIL = "ledo@gah.com"
TEST_USER_USERNAME = "ledo"
TEST_USER_PASSWORD = "GalacticAllianceOfHumankind"
class APIEmailActivationTestCase(APITestCase, TenantTestCase):
fixtures = []
def setup_tenant(self, tenant):
"""Public Schema"""
tenant.schema_name = 'test'
tenant.name = "Galactic Alliance of Humankind"
tenant.has_perks=True
tenant.has_mentors=True
tenant.how_discovered = "Command HQ"
tenant.how_many_served = 1
@classmethod
def setUpTestData(cls):
Group.objects.bulk_create([
Group(id=constants.ENTREPRENEUR_GROUP_ID, name="Entreprenuer",),
Group(id=constants.MENTOR_GROUP_ID, name="Mentor",),
Group(id=constants.ADVISOR_GROUP_ID, name="Advisor",),
Group(id=constants.ORGANIZATION_MANAGER_GROUP_ID, name="Org Manager",),
Group(id=constants.ORGANIZATION_ADMIN_GROUP_ID, name="Org Admin",),
Group(id=constants.CLIENT_MANAGER_GROUP_ID, name="Client Manager",),
Group(id=constants.SYSTEM_ADMIN_GROUP_ID, name="System Admin",),
])
user = User.objects.create_user( # Create our User.
email=TEST_USER_EMAIL,
username=TEST_USER_USERNAME,
password=TEST_USER_PASSWORD
)
user.is_active = True
user.save()
@transaction.atomic
def setUp(self):
translation.activate('en') # Set English
super(APIEmailActivationTestCase, self).setUp()
self.c = TenantClient(self.tenant)
@transaction.atomic
def tearDown(self):
users = User.objects.all()
for user in users.all():
user.delete()
# super(APIEmailActivationTestCase, self).tearDown()
@transaction.atomic
def test_api_send_activation(self):
url = reverse('api_emailactivation')
data = {
'email': TEST_USER_EMAIL,
}
response = self.c.post(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
from django.core import mail
# Test that one message has been sent.
self.assertEqual(len(mail.outbox), 1)
# Verify that the subject of the first message is correct.
self.assertEqual(mail.outbox[0].subject, 'Den Activation')
@transaction.atomic
def test_api_send_activation_with_no_email(self):
url = reverse('api_emailactivation')
data = {
'email': 'whalesquid@hideauze.com',
}
response = self.c.post(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
| 35.434783
| 83
| 0.68589
|
from django.core.urlresolvers import resolve, reverse
from django.db import transaction
from django.test import TestCase
from django.test import Client
from django.utils import translation
from django.contrib.auth.models import User, Group
from django.contrib.auth import authenticate, login, logout
from rest_framework import status
from rest_framework.test import APIClient
from rest_framework.test import APITestCase
from rest_framework.authtoken.models import Token
from django_tenants.test.cases import TenantTestCase
from django_tenants.test.client import TenantClient
from smegurus import constants
TEST_USER_EMAIL = "ledo@gah.com"
TEST_USER_USERNAME = "ledo"
TEST_USER_PASSWORD = "GalacticAllianceOfHumankind"
class APIEmailActivationTestCase(APITestCase, TenantTestCase):
fixtures = []
def setup_tenant(self, tenant):
tenant.schema_name = 'test'
tenant.name = "Galactic Alliance of Humankind"
tenant.has_perks=True
tenant.has_mentors=True
tenant.how_discovered = "Command HQ"
tenant.how_many_served = 1
@classmethod
def setUpTestData(cls):
Group.objects.bulk_create([
Group(id=constants.ENTREPRENEUR_GROUP_ID, name="Entreprenuer",),
Group(id=constants.MENTOR_GROUP_ID, name="Mentor",),
Group(id=constants.ADVISOR_GROUP_ID, name="Advisor",),
Group(id=constants.ORGANIZATION_MANAGER_GROUP_ID, name="Org Manager",),
Group(id=constants.ORGANIZATION_ADMIN_GROUP_ID, name="Org Admin",),
Group(id=constants.CLIENT_MANAGER_GROUP_ID, name="Client Manager",),
Group(id=constants.SYSTEM_ADMIN_GROUP_ID, name="System Admin",),
])
user = User.objects.create_user(
email=TEST_USER_EMAIL,
username=TEST_USER_USERNAME,
password=TEST_USER_PASSWORD
)
user.is_active = True
user.save()
@transaction.atomic
def setUp(self):
translation.activate('en')
super(APIEmailActivationTestCase, self).setUp()
self.c = TenantClient(self.tenant)
@transaction.atomic
def tearDown(self):
users = User.objects.all()
for user in users.all():
user.delete()
@transaction.atomic
def test_api_send_activation(self):
url = reverse('api_emailactivation')
data = {
'email': TEST_USER_EMAIL,
}
response = self.c.post(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
from django.core import mail
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].subject, 'Den Activation')
@transaction.atomic
def test_api_send_activation_with_no_email(self):
url = reverse('api_emailactivation')
data = {
'email': 'whalesquid@hideauze.com',
}
response = self.c.post(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
| true
| true
|
f70565606ba1ea85664abd157116b0df65dd6937
| 12,274
|
py
|
Python
|
wagtail/wagtailembeds/oembed_providers.py
|
seddonym/wagtail-tableblock
|
aea3ce67a0800285b20b93018b7c0a8679e479b7
|
[
"BSD-3-Clause"
] | null | null | null |
wagtail/wagtailembeds/oembed_providers.py
|
seddonym/wagtail-tableblock
|
aea3ce67a0800285b20b93018b7c0a8679e479b7
|
[
"BSD-3-Clause"
] | null | null | null |
wagtail/wagtailembeds/oembed_providers.py
|
seddonym/wagtail-tableblock
|
aea3ce67a0800285b20b93018b7c0a8679e479b7
|
[
"BSD-3-Clause"
] | null | null | null |
OEMBED_ENDPOINTS = {
"https://speakerdeck.com/oembed.{format}": [
"^http(?:s)?://speakerdeck\\.com/.+$"
],
"https://alpha-api.app.net/oembed": [
"^http(?:s)?://alpha\\.app\\.net/[^#?/]+/post/.+$",
"^http(?:s)?://photos\\.app\\.net/[^#?/]+/.+$"
],
"http://www.youtube.com/oembed": [
"^http(?:s)?://(?:[-\\w]+\\.)?youtube\\.com/watch.+$",
"^http(?:s)?://(?:[-\\w]+\\.)?youtube\\.com/v/.+$",
"^http(?:s)?://youtu\\.be/.+$",
"^http(?:s)?://(?:[-\\w]+\\.)?youtube\\.com/user/.+$",
"^http(?:s)?://(?:[-\\w]+\\.)?youtube\\.com/[^#?/]+#[^#?/]+/.+$",
"^http(?:s)?://m\\.youtube\\.com/index.+$",
"^http(?:s)?://(?:[-\\w]+\\.)?youtube\\.com/profile.+$",
"^http(?:s)?://(?:[-\\w]+\\.)?youtube\\.com/view_play_list.+$",
"^http(?:s)?://(?:[-\\w]+\\.)?youtube\\.com/playlist.+$"
],
"http://backend.deviantart.com/oembed": [
"^http://(?:[-\\w]+\\.)?deviantart\\.com/art/.+$",
"^http://fav\\.me/.+$",
"^http://sta\\.sh/.+$",
"^http://(?:[-\\w]+\\.)?deviantart\\.com/[^#?/]+#/d.+$"
],
"http://blip.tv/oembed/": [
"^http://[-\\w]+\\.blip\\.tv/.+$"
],
"http://www.dailymotion.com/api/oembed/": [
"^http://[-\\w]+\\.dailymotion\\.com/.+$"
],
"http://www.flickr.com/services/oembed/": [
"^http://[-\\w]+\\.flickr\\.com/photos/.+$",
"^http://flic\\.kr\\.com/.+$"
],
"http://www.hulu.com/api/oembed.{format}": [
"^http://www\\.hulu\\.com/watch/.+$"
],
"http://www.nfb.ca/remote/services/oembed/": [
"^http://(?:[-\\w]+\\.)?nfb\\.ca/film/.+$"
],
"http://qik.com/api/oembed.{format}": [
"^http://qik\\.com/.+$",
"^http://qik\\.ly/.+$"
],
"http://revision3.com/api/oembed/": [
"^http://[-\\w]+\\.revision3\\.com/.+$"
],
"http://www.scribd.com/services/oembed": [
"^http://[-\\w]+\\.scribd\\.com/.+$"
],
"http://www.viddler.com/oembed/": [
"^http://[-\\w]+\\.viddler\\.com/v/.+$",
"^http://[-\\w]+\\.viddler\\.com/explore/.+$"
],
"http://www.vimeo.com/api/oembed.{format}": [
"^http(?:s)?://(?:www\\.)?vimeo\\.com/.+$",
"^http(?:s)?://player\\.vimeo\\.com/.+$"
],
"http://dotsub.com/services/oembed": [
"^http://dotsub\\.com/view/.+$"
],
"http://www.yfrog.com/api/oembed": [
"^http(?:s)?://(?:www\\.)?yfrog\\.com/.+$",
"^http(?:s)?://(?:www\\.)?yfrog\\.us/.+$"
],
"http://clikthrough.com/services/oembed": [
"^http(?:s)?://(?:[-\\w]+\\.)?clikthrough\\.com/.+$"
],
"http://www.kinomap.com/oembed": [
"^http://[-\\w]+\\.kinomap\\.com/.+$"
],
"https://photobucket.com/oembed": [
"^http://(?:[-\\w]+\\.)?photobucket\\.com/albums/.+$",
"^http://(?:[-\\w]+\\.)?photobucket\\.com/groups/.+$"
],
"http://api.instagram.com/oembed": [
"^http://instagr\\.am/p/.+$",
"^http[s]?://instagram\\.com/p/.+$"
],
"https://www.slideshare.net/api/oembed/2": [
"^http://www\\.slideshare\\.net/.+$"
],
"http://tv.majorleaguegaming.com/oembed": [
"^http://mlg\\.tv/.+$",
"^http://tv\\.majorleaguegaming\\.com/.+$"
],
"http://my.opera.com/service/oembed": [
"^http://my\\.opera\\.com/.+$"
],
"http://skitch.com/oembed": [
"^http(?:s)?://(?:www\\.)?skitch\\.com/.+$",
"^http://skit\\.ch/.+$"
],
"https://api.twitter.com/1/statuses/oembed.{format}": [
"^http(?:s)?://twitter\\.com/(?:#!)?[^#?/]+/status/.+$"
],
"https://soundcloud.com/oembed": [
"^https://soundcloud\\.com/[^#?/]+/.+$"
],
"http://www.collegehumor.com/oembed.{format}": [
"^http://(?:www\\.)?collegehumor\\.com/video/.+$",
"^http://(?:www\\.)?collegehumor\\.com/video:.+$"
],
"http://www.polleverywhere.com/services/oembed/": [
"^http://www\\.polleverywhere\\.com/polls/.+$",
"^http://www\\.polleverywhere\\.com/multiple_choice_polls/.+$",
"^http://www\\.polleverywhere\\.com/free_text_polls/.+$"
],
"http://www.ifixit.com/Embed": [
"^http://www\\.ifixit\\.com/[^#?/]+/[^#?/]+/.+$"
],
"http://api.smugmug.com/services/oembed/": [
"^http(?:s)?://(?:www\\.)?smugmug\\.com/[^#?/]+/.+$"
],
"https://github.com/api/oembed": [
"^http(?:s)?://gist\\.github\\.com/.+$"
],
"http://animoto.com/services/oembed": [
"^http://animoto\\.com/play/.+$"
],
"http://www.rdio.com/api/oembed": [
"^http://(?:wwww\\.)?rdio\\.com/people/[^#?/]+/playlists/.+$",
"^http://[-\\w]+\\.rdio\\.com/artist/[^#?/]+/album/.+$"
],
"http://api.5min.com/oembed.{format}": [
"^http://www\\.5min\\.com/video/.+$"
],
"http://500px.com/photo/{1}/oembed.{format}": [
"^http://500px\\.com/photo/([^#?/]+)(?:.+)?$"
],
"http://api.dipdive.com/oembed.{format}": [
"^http://[-\\w]+\\.dipdive\\.com/media/.+$"
],
"http://video.yandex.ru/oembed.{format}": [
"^http://video\\.yandex\\.ru/users/[^#?/]+/view/.+$"
],
"http://www.mixcloud.com/oembed/": [
"^http://www\\.mixcloud\\.com/oembed/[^#?/]+/.+$"
],
"http://www.kickstarter.com/services/oembed": [
"^http(?:s)://[-\\w]+\\.kickstarter\\.com/projects/.+$"
],
"http://coub.com/api/oembed.{format}": [
"^http(?:s)?://coub\\.com/view/.+$",
"^http(?:s)?://coub\\.com/embed/.+$"
],
"http://www.screenr.com/api/oembed.{format}": [
"^http://www\\.screenr\\.com/.+$"
],
"http://www.funnyordie.com/oembed.{format}": [
"^http://www\\.funnyordie\\.com/videos/.+$"
],
"http://fast.wistia.com/oembed.{format}": [
"^http://[-\\w]+\\.wista\\.com/medias/.+$"
],
"http://www.ustream.tv/oembed": [
"^http(?:s)?://(?:www\\.)?ustream\\.tv/.+$",
"^http(?:s)?://(?:www\\.)?ustream\\.com/.+$",
"^http://ustre\\.am/.+$"
],
"http://wordpress.tv/oembed/": [
"^http://wordpress\\.tv/.+$"
],
"http://polldaddy.com/oembed/": [
"^http(?:s)?://(?:[-\\w]+\\.)?polldaddy\\.com/.+$"
],
"http://api.bambuser.com/oembed.{format}": [
"^http://bambuser\\.com/channel/[^#?/]+/broadcast/.+$",
"^http://bambuser\\.com/channel/.+$",
"^http://bambuser\\.com/v/.+$"
],
"http://www.ted.com/talks/oembed.{format}": [
"^http(?:s)?://(?:www\\.)?ted\\.com/talks/.+$",
"^http(?:s)?://(?:www\\.)?ted\\.com/talks/lang/[^#?/]+/.+$",
"^http(?:s)?://(?:www\\.)?ted\\.com/index\\.php/talks/.+$",
"^http(?:s)?://(?:www\\.)?ted\\.com/index\\.php/talks/lang/[^#?/]+/.+$"
],
"http://chirb.it/oembed.{format}": [
"^http://chirb\\.it/.+$"
],
"https://www.circuitlab.com/circuit/oembed/": [
"^http(?:s)?://(?:www\\.)?circuitlab\\.com/circuit/.+$"
],
"http://api.geograph.org.uk/api/oembed": [
"^http://(?:[-\\w]+\\.)?geograph\\.org\\.uk/.+$",
"^http://(?:[-\\w]+\\.)?geograph\\.co\\.uk/.+$",
"^http://(?:[-\\w]+\\.)?geograph\\.ie/.+$"
],
"http://geo.hlipp.de/restapi.php/api/oembed": [
"^http://geo-en\\.hlipp\\.de/.+$",
"^http://geo\\.hlipp\\.de/.+$",
"^http://germany\\.geograph\\.org/.+$"
],
"http://www.geograph.org.gg/api/oembed": [
"^http://(?:[-\\w]+\\.)?geograph\\.org\\.gg/.+$",
"^http://(?:[-\\w]+\\.)?geograph\\.org\\.je/.+$",
"^http://channel-islands\\.geograph\\.org/.+$",
"^http://channel-islands\\.geographs\\.org/.+$",
"^http://(?:[-\\w]+\\.)?channel\\.geographs\\.org/.+$"
],
"http://vzaar.com/api/videos/{1}.{format}": [
"^http://(?:www\\.)?vzaar\\.com/videos/([^#?/]+)(?:.+)?$",
"^http://www\\.vzaar\\.tv/([^#?/]+)(?:.+)?$",
"^http://vzaar\\.tv/([^#?/]+)(?:.+)?$",
"^http://vzaar\\.me/([^#?/]+)(?:.+)?$",
"^http://[-\\w]+\\.vzaar\\.me/([^#?/]+)(?:.+)?$"
],
"http://api.minoto-video.com/services/oembed.{format}": [
"^http://api\\.minoto-video\\.com/publishers/[^#?/]+/videos/.+$",
"^http://dashboard\\.minoto-video\\.com/main/video/details/.+$",
"^http://embed\\.minoto-video\\.com/.+$"
],
"http://www.videojug.com/oembed.{format}": [
"^http(?:s)?://(?:[-\\w]+\\.)?videojug\\.com/film/.+$",
"^http(?:s)?://(?:[-\\w]+\\.)?videojug\\.com/payer/.+$",
"^http(?:s)?://(?:[-\\w]+\\.)?videojug\\.com/interview/.+$"
],
"http://videos.sapo.pt/oembed": [
"^http(?:s)?://videos\\.sapo\\.pt/.+$"
],
"http://vhx.tv/services/oembed.{format}": [
"^http(?:s)?://(?:www\\.)?vhx\\.tv/.+$"
],
"http://api.justin.tv/api/embed/from_url.{format}": [
"^http(?:s)?://(?:www\\.)?justin\\.tv/.+$"
],
"http://official.fm/services/oembed.{format}": [
"^http(?:s)?://official\\.fm/.+$"
],
"http://huffduffer.com/oembed": [
"^http(?:s)?://(?:www\\.)?huffduffer\\.com/[^#?/]+/.+$"
],
"https://embed.spotify.com/oembed/": [
"^http(?:s)?://open\\.spotify\\.com/.+$",
"^http(?:s)?://spoti\\.fi/.+$"
],
"http://shoudio.com/api/oembed": [
"^http://shoudio\\.com/.+$",
"^http://shoud\\.io/.+$"
],
"http://api.mobypicture.com/oEmbed": [
"^http(?:s)?://(?:www\\.)?mobypicture\\.com/user/[^#?/]+/view/.+$",
"^http(?:s)?://(?:www\\.)?moby\\.to/.+$"
],
"http://www.23hq.com/23/oembed": [
"^http(?:s)?://(?:www\\.)?23hq\\.com/[^#?/]+/photo/.+$"
],
"http://gmep.org/oembed.{format}": [
"^http(?:s)?://(?:www\\.)?gmep\\.org/.+$",
"^http(?:s)?://gmep\\.imeducate\\.com/.+$"
],
"http://oembed.urtak.com/1/oembed": [
"^http(?:s)?://(?:[-\\w]+\\.)?urtak\\.com/.+$"
],
"http://cacoo.com/oembed.{format}": [
"^http(?:s)?://cacoo\\.com/.+$"
],
"http://api.dailymile.com/oembed": [
"^http(?:s)?://(?:www\\.)?dailymile\\.com/people/[^#?/]+/entries/.+$"
],
"http://www.dipity.com/oembed/timeline/": [
"^http(?:s)?://(?:www\\.)?dipity\\.com/timeline/.+$",
"^http(?:s)?://(?:www\\.)?dipity\\.com/voaweb/.+$"
],
"https://sketchfab.com/oembed": [
"^http(?:s)?://sketchfab\\.com/show/.+$"
],
"https://api.meetup.com/oembed": [
"^http(?:s)?://(?:www\\.)?meetup\\.com/.+$",
"^http(?:s)?://(?:www\\.)?meetup\\.ps/.+$"
],
"https://roomshare.jp/oembed.{format}": [
"^http(?:s)?://(?:www\\.)?roomshare\\.jp/(?:en/)?post/.+$"
],
"http://crowdranking.com/api/oembed.{format}": [
"^http(?:s)?://crowdranking\\.com/crowdrankings/.+$",
"^http(?:s)?://crowdranking\\.com/rankings/.+$",
"^http(?:s)?://crowdranking\\.com/topics/.+$",
"^http(?:s)?://crowdranking\\.com/widgets/.+$",
"^http(?:s)?://crowdranking\\.com/r/.+$"
],
"http://openapi.etsy.com/svc/oembed/": [
"^http(?:s)?://(?:www\\.)?etsy\\.com/listing/.+$"
],
"https://audioboo.fm/publishing/oembed.{format}": [
"^http(?:s)?://audioboo\\.fm/boos/.+$"
],
"http://demo.clikthrough.com/services/oembed/": [
"^http(?:s)?://demo\\.clikthrough\\.com/theater/video/.+$"
],
"http://www.ifttt.com/oembed/": [
"^http(?:s)?://ifttt\\.com/recipes/.+$"
],
# Added 11th December 2014 - http://developers.issuu.com/api/oembed.html
"http://issuu.com/oembed": [
"^http(?:s)?://(?:www\\.)?issuu\\.com/[^#?/]+/docs/.+$"
],
}
# Compile endpoints into regular expression objects
import re
def compile_endpoints():
endpoints = {}
for endpoint in OEMBED_ENDPOINTS.keys():
endpoint_key = endpoint.replace('{format}', 'json')
endpoints[endpoint_key] = []
for pattern in OEMBED_ENDPOINTS[endpoint]:
endpoints[endpoint_key].append(re.compile(pattern))
return endpoints
OEMBED_ENDPOINTS_COMPILED = compile_endpoints()
def get_oembed_provider(url):
for endpoint in OEMBED_ENDPOINTS_COMPILED.keys():
for pattern in OEMBED_ENDPOINTS_COMPILED[endpoint]:
if re.match(pattern, url):
return endpoint
return
| 37.420732
| 79
| 0.446309
|
OEMBED_ENDPOINTS = {
"https://speakerdeck.com/oembed.{format}": [
"^http(?:s)?://speakerdeck\\.com/.+$"
],
"https://alpha-api.app.net/oembed": [
"^http(?:s)?://alpha\\.app\\.net/[^#?/]+/post/.+$",
"^http(?:s)?://photos\\.app\\.net/[^#?/]+/.+$"
],
"http://www.youtube.com/oembed": [
"^http(?:s)?://(?:[-\\w]+\\.)?youtube\\.com/watch.+$",
"^http(?:s)?://(?:[-\\w]+\\.)?youtube\\.com/v/.+$",
"^http(?:s)?://youtu\\.be/.+$",
"^http(?:s)?://(?:[-\\w]+\\.)?youtube\\.com/user/.+$",
"^http(?:s)?://(?:[-\\w]+\\.)?youtube\\.com/[^#?/]+#[^#?/]+/.+$",
"^http(?:s)?://m\\.youtube\\.com/index.+$",
"^http(?:s)?://(?:[-\\w]+\\.)?youtube\\.com/profile.+$",
"^http(?:s)?://(?:[-\\w]+\\.)?youtube\\.com/view_play_list.+$",
"^http(?:s)?://(?:[-\\w]+\\.)?youtube\\.com/playlist.+$"
],
"http://backend.deviantart.com/oembed": [
"^http://(?:[-\\w]+\\.)?deviantart\\.com/art/.+$",
"^http://fav\\.me/.+$",
"^http://sta\\.sh/.+$",
"^http://(?:[-\\w]+\\.)?deviantart\\.com/[^#?/]+#/d.+$"
],
"http://blip.tv/oembed/": [
"^http://[-\\w]+\\.blip\\.tv/.+$"
],
"http://www.dailymotion.com/api/oembed/": [
"^http://[-\\w]+\\.dailymotion\\.com/.+$"
],
"http://www.flickr.com/services/oembed/": [
"^http://[-\\w]+\\.flickr\\.com/photos/.+$",
"^http://flic\\.kr\\.com/.+$"
],
"http://www.hulu.com/api/oembed.{format}": [
"^http://www\\.hulu\\.com/watch/.+$"
],
"http://www.nfb.ca/remote/services/oembed/": [
"^http://(?:[-\\w]+\\.)?nfb\\.ca/film/.+$"
],
"http://qik.com/api/oembed.{format}": [
"^http://qik\\.com/.+$",
"^http://qik\\.ly/.+$"
],
"http://revision3.com/api/oembed/": [
"^http://[-\\w]+\\.revision3\\.com/.+$"
],
"http://www.scribd.com/services/oembed": [
"^http://[-\\w]+\\.scribd\\.com/.+$"
],
"http://www.viddler.com/oembed/": [
"^http://[-\\w]+\\.viddler\\.com/v/.+$",
"^http://[-\\w]+\\.viddler\\.com/explore/.+$"
],
"http://www.vimeo.com/api/oembed.{format}": [
"^http(?:s)?://(?:www\\.)?vimeo\\.com/.+$",
"^http(?:s)?://player\\.vimeo\\.com/.+$"
],
"http://dotsub.com/services/oembed": [
"^http://dotsub\\.com/view/.+$"
],
"http://www.yfrog.com/api/oembed": [
"^http(?:s)?://(?:www\\.)?yfrog\\.com/.+$",
"^http(?:s)?://(?:www\\.)?yfrog\\.us/.+$"
],
"http://clikthrough.com/services/oembed": [
"^http(?:s)?://(?:[-\\w]+\\.)?clikthrough\\.com/.+$"
],
"http://www.kinomap.com/oembed": [
"^http://[-\\w]+\\.kinomap\\.com/.+$"
],
"https://photobucket.com/oembed": [
"^http://(?:[-\\w]+\\.)?photobucket\\.com/albums/.+$",
"^http://(?:[-\\w]+\\.)?photobucket\\.com/groups/.+$"
],
"http://api.instagram.com/oembed": [
"^http://instagr\\.am/p/.+$",
"^http[s]?://instagram\\.com/p/.+$"
],
"https://www.slideshare.net/api/oembed/2": [
"^http://www\\.slideshare\\.net/.+$"
],
"http://tv.majorleaguegaming.com/oembed": [
"^http://mlg\\.tv/.+$",
"^http://tv\\.majorleaguegaming\\.com/.+$"
],
"http://my.opera.com/service/oembed": [
"^http://my\\.opera\\.com/.+$"
],
"http://skitch.com/oembed": [
"^http(?:s)?://(?:www\\.)?skitch\\.com/.+$",
"^http://skit\\.ch/.+$"
],
"https://api.twitter.com/1/statuses/oembed.{format}": [
"^http(?:s)?://twitter\\.com/(?:#!)?[^#?/]+/status/.+$"
],
"https://soundcloud.com/oembed": [
"^https://soundcloud\\.com/[^#?/]+/.+$"
],
"http://www.collegehumor.com/oembed.{format}": [
"^http://(?:www\\.)?collegehumor\\.com/video/.+$",
"^http://(?:www\\.)?collegehumor\\.com/video:.+$"
],
"http://www.polleverywhere.com/services/oembed/": [
"^http://www\\.polleverywhere\\.com/polls/.+$",
"^http://www\\.polleverywhere\\.com/multiple_choice_polls/.+$",
"^http://www\\.polleverywhere\\.com/free_text_polls/.+$"
],
"http://www.ifixit.com/Embed": [
"^http://www\\.ifixit\\.com/[^#?/]+/[^#?/]+/.+$"
],
"http://api.smugmug.com/services/oembed/": [
"^http(?:s)?://(?:www\\.)?smugmug\\.com/[^#?/]+/.+$"
],
"https://github.com/api/oembed": [
"^http(?:s)?://gist\\.github\\.com/.+$"
],
"http://animoto.com/services/oembed": [
"^http://animoto\\.com/play/.+$"
],
"http://www.rdio.com/api/oembed": [
"^http://(?:wwww\\.)?rdio\\.com/people/[^#?/]+/playlists/.+$",
"^http://[-\\w]+\\.rdio\\.com/artist/[^#?/]+/album/.+$"
],
"http://api.5min.com/oembed.{format}": [
"^http://www\\.5min\\.com/video/.+$"
],
"http://500px.com/photo/{1}/oembed.{format}": [
"^http://500px\\.com/photo/([^#?/]+)(?:.+)?$"
],
"http://api.dipdive.com/oembed.{format}": [
"^http://[-\\w]+\\.dipdive\\.com/media/.+$"
],
"http://video.yandex.ru/oembed.{format}": [
"^http://video\\.yandex\\.ru/users/[^#?/]+/view/.+$"
],
"http://www.mixcloud.com/oembed/": [
"^http://www\\.mixcloud\\.com/oembed/[^#?/]+/.+$"
],
"http://www.kickstarter.com/services/oembed": [
"^http(?:s)://[-\\w]+\\.kickstarter\\.com/projects/.+$"
],
"http://coub.com/api/oembed.{format}": [
"^http(?:s)?://coub\\.com/view/.+$",
"^http(?:s)?://coub\\.com/embed/.+$"
],
"http://www.screenr.com/api/oembed.{format}": [
"^http://www\\.screenr\\.com/.+$"
],
"http://www.funnyordie.com/oembed.{format}": [
"^http://www\\.funnyordie\\.com/videos/.+$"
],
"http://fast.wistia.com/oembed.{format}": [
"^http://[-\\w]+\\.wista\\.com/medias/.+$"
],
"http://www.ustream.tv/oembed": [
"^http(?:s)?://(?:www\\.)?ustream\\.tv/.+$",
"^http(?:s)?://(?:www\\.)?ustream\\.com/.+$",
"^http://ustre\\.am/.+$"
],
"http://wordpress.tv/oembed/": [
"^http://wordpress\\.tv/.+$"
],
"http://polldaddy.com/oembed/": [
"^http(?:s)?://(?:[-\\w]+\\.)?polldaddy\\.com/.+$"
],
"http://api.bambuser.com/oembed.{format}": [
"^http://bambuser\\.com/channel/[^#?/]+/broadcast/.+$",
"^http://bambuser\\.com/channel/.+$",
"^http://bambuser\\.com/v/.+$"
],
"http://www.ted.com/talks/oembed.{format}": [
"^http(?:s)?://(?:www\\.)?ted\\.com/talks/.+$",
"^http(?:s)?://(?:www\\.)?ted\\.com/talks/lang/[^#?/]+/.+$",
"^http(?:s)?://(?:www\\.)?ted\\.com/index\\.php/talks/.+$",
"^http(?:s)?://(?:www\\.)?ted\\.com/index\\.php/talks/lang/[^#?/]+/.+$"
],
"http://chirb.it/oembed.{format}": [
"^http://chirb\\.it/.+$"
],
"https://www.circuitlab.com/circuit/oembed/": [
"^http(?:s)?://(?:www\\.)?circuitlab\\.com/circuit/.+$"
],
"http://api.geograph.org.uk/api/oembed": [
"^http://(?:[-\\w]+\\.)?geograph\\.org\\.uk/.+$",
"^http://(?:[-\\w]+\\.)?geograph\\.co\\.uk/.+$",
"^http://(?:[-\\w]+\\.)?geograph\\.ie/.+$"
],
"http://geo.hlipp.de/restapi.php/api/oembed": [
"^http://geo-en\\.hlipp\\.de/.+$",
"^http://geo\\.hlipp\\.de/.+$",
"^http://germany\\.geograph\\.org/.+$"
],
"http://www.geograph.org.gg/api/oembed": [
"^http://(?:[-\\w]+\\.)?geograph\\.org\\.gg/.+$",
"^http://(?:[-\\w]+\\.)?geograph\\.org\\.je/.+$",
"^http://channel-islands\\.geograph\\.org/.+$",
"^http://channel-islands\\.geographs\\.org/.+$",
"^http://(?:[-\\w]+\\.)?channel\\.geographs\\.org/.+$"
],
"http://vzaar.com/api/videos/{1}.{format}": [
"^http://(?:www\\.)?vzaar\\.com/videos/([^#?/]+)(?:.+)?$",
"^http://www\\.vzaar\\.tv/([^#?/]+)(?:.+)?$",
"^http://vzaar\\.tv/([^#?/]+)(?:.+)?$",
"^http://vzaar\\.me/([^#?/]+)(?:.+)?$",
"^http://[-\\w]+\\.vzaar\\.me/([^#?/]+)(?:.+)?$"
],
"http://api.minoto-video.com/services/oembed.{format}": [
"^http://api\\.minoto-video\\.com/publishers/[^#?/]+/videos/.+$",
"^http://dashboard\\.minoto-video\\.com/main/video/details/.+$",
"^http://embed\\.minoto-video\\.com/.+$"
],
"http://www.videojug.com/oembed.{format}": [
"^http(?:s)?://(?:[-\\w]+\\.)?videojug\\.com/film/.+$",
"^http(?:s)?://(?:[-\\w]+\\.)?videojug\\.com/payer/.+$",
"^http(?:s)?://(?:[-\\w]+\\.)?videojug\\.com/interview/.+$"
],
"http://videos.sapo.pt/oembed": [
"^http(?:s)?://videos\\.sapo\\.pt/.+$"
],
"http://vhx.tv/services/oembed.{format}": [
"^http(?:s)?://(?:www\\.)?vhx\\.tv/.+$"
],
"http://api.justin.tv/api/embed/from_url.{format}": [
"^http(?:s)?://(?:www\\.)?justin\\.tv/.+$"
],
"http://official.fm/services/oembed.{format}": [
"^http(?:s)?://official\\.fm/.+$"
],
"http://huffduffer.com/oembed": [
"^http(?:s)?://(?:www\\.)?huffduffer\\.com/[^#?/]+/.+$"
],
"https://embed.spotify.com/oembed/": [
"^http(?:s)?://open\\.spotify\\.com/.+$",
"^http(?:s)?://spoti\\.fi/.+$"
],
"http://shoudio.com/api/oembed": [
"^http://shoudio\\.com/.+$",
"^http://shoud\\.io/.+$"
],
"http://api.mobypicture.com/oEmbed": [
"^http(?:s)?://(?:www\\.)?mobypicture\\.com/user/[^#?/]+/view/.+$",
"^http(?:s)?://(?:www\\.)?moby\\.to/.+$"
],
"http://www.23hq.com/23/oembed": [
"^http(?:s)?://(?:www\\.)?23hq\\.com/[^#?/]+/photo/.+$"
],
"http://gmep.org/oembed.{format}": [
"^http(?:s)?://(?:www\\.)?gmep\\.org/.+$",
"^http(?:s)?://gmep\\.imeducate\\.com/.+$"
],
"http://oembed.urtak.com/1/oembed": [
"^http(?:s)?://(?:[-\\w]+\\.)?urtak\\.com/.+$"
],
"http://cacoo.com/oembed.{format}": [
"^http(?:s)?://cacoo\\.com/.+$"
],
"http://api.dailymile.com/oembed": [
"^http(?:s)?://(?:www\\.)?dailymile\\.com/people/[^#?/]+/entries/.+$"
],
"http://www.dipity.com/oembed/timeline/": [
"^http(?:s)?://(?:www\\.)?dipity\\.com/timeline/.+$",
"^http(?:s)?://(?:www\\.)?dipity\\.com/voaweb/.+$"
],
"https://sketchfab.com/oembed": [
"^http(?:s)?://sketchfab\\.com/show/.+$"
],
"https://api.meetup.com/oembed": [
"^http(?:s)?://(?:www\\.)?meetup\\.com/.+$",
"^http(?:s)?://(?:www\\.)?meetup\\.ps/.+$"
],
"https://roomshare.jp/oembed.{format}": [
"^http(?:s)?://(?:www\\.)?roomshare\\.jp/(?:en/)?post/.+$"
],
"http://crowdranking.com/api/oembed.{format}": [
"^http(?:s)?://crowdranking\\.com/crowdrankings/.+$",
"^http(?:s)?://crowdranking\\.com/rankings/.+$",
"^http(?:s)?://crowdranking\\.com/topics/.+$",
"^http(?:s)?://crowdranking\\.com/widgets/.+$",
"^http(?:s)?://crowdranking\\.com/r/.+$"
],
"http://openapi.etsy.com/svc/oembed/": [
"^http(?:s)?://(?:www\\.)?etsy\\.com/listing/.+$"
],
"https://audioboo.fm/publishing/oembed.{format}": [
"^http(?:s)?://audioboo\\.fm/boos/.+$"
],
"http://demo.clikthrough.com/services/oembed/": [
"^http(?:s)?://demo\\.clikthrough\\.com/theater/video/.+$"
],
"http://www.ifttt.com/oembed/": [
"^http(?:s)?://ifttt\\.com/recipes/.+$"
],
"http://issuu.com/oembed": [
"^http(?:s)?://(?:www\\.)?issuu\\.com/[^#?/]+/docs/.+$"
],
}
import re
def compile_endpoints():
endpoints = {}
for endpoint in OEMBED_ENDPOINTS.keys():
endpoint_key = endpoint.replace('{format}', 'json')
endpoints[endpoint_key] = []
for pattern in OEMBED_ENDPOINTS[endpoint]:
endpoints[endpoint_key].append(re.compile(pattern))
return endpoints
OEMBED_ENDPOINTS_COMPILED = compile_endpoints()
def get_oembed_provider(url):
for endpoint in OEMBED_ENDPOINTS_COMPILED.keys():
for pattern in OEMBED_ENDPOINTS_COMPILED[endpoint]:
if re.match(pattern, url):
return endpoint
return
| true
| true
|
f70565f8f17d23fbb5f86778bb299941633d2717
| 41,785
|
py
|
Python
|
discord/commands/commands.py
|
ThatGenZGamer48/Texus
|
96cdee4544f3bbb873620ba7a8926d6f7dc5a672
|
[
"MIT"
] | null | null | null |
discord/commands/commands.py
|
ThatGenZGamer48/Texus
|
96cdee4544f3bbb873620ba7a8926d6f7dc5a672
|
[
"MIT"
] | null | null | null |
discord/commands/commands.py
|
ThatGenZGamer48/Texus
|
96cdee4544f3bbb873620ba7a8926d6f7dc5a672
|
[
"MIT"
] | null | null | null |
"""
The MIT License (MIT)
Copyright (c) 2015-2021 Rapptz
Copyright (c) 2021-2021 Pycord Development
Copyright (c) 2021-present Texus
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
from __future__ import annotations
import asyncio
import types
import functools
import inspect
from collections import OrderedDict
from typing import Any, Callable, Dict, List, Optional, Union, TYPE_CHECKING
from ..enums import SlashCommandOptionType, ChannelType
from ..member import Member
from ..user import User
from ..message import Message
from .context import ApplicationContext, AutocompleteContext
from ..utils import find, get_or_fetch, async_all
from ..errors import ValidationError, ClientException
from .errors import ApplicationCommandError, CheckFailure, ApplicationCommandInvokeError
from .permissions import Permission
__all__ = (
"_BaseCommand",
"ApplicationCommand",
"SlashCommand",
"Option",
"OptionChoice",
"option",
"slash_command",
"application_command",
"user_command",
"message_command",
"command",
"SlashCommandGroup",
"ContextMenuCommand",
"UserCommand",
"MessageCommand",
)
if TYPE_CHECKING:
from ..interactions import Interaction
def wrap_callback(coro):
@functools.wraps(coro)
async def wrapped(*args, **kwargs):
try:
ret = await coro(*args, **kwargs)
except ApplicationCommandError:
raise
except asyncio.CancelledError:
return
except Exception as exc:
raise ApplicationCommandInvokeError(exc) from exc
return ret
return wrapped
def hooked_wrapped_callback(command, ctx, coro):
@functools.wraps(coro)
async def wrapped(arg):
try:
ret = await coro(arg)
except ApplicationCommandError:
raise
except asyncio.CancelledError:
return
except Exception as exc:
raise ApplicationCommandInvokeError(exc) from exc
finally:
await command.call_after_hooks(ctx)
return ret
return wrapped
class _BaseCommand:
__slots__ = ()
class ApplicationCommand(_BaseCommand):
cog = None
def __repr__(self):
return f"<discord.commands.{self.__class__.__name__} name={self.name}>"
def __eq__(self, other):
return isinstance(other, self.__class__)
async def __call__(self, ctx, *args, **kwargs):
"""|coro|
Calls the command's callback.
This method bypasses all checks that a command has and does not
convert the arguments beforehand, so take care to pass the correct
arguments in.
"""
return await self.callback(ctx, *args, **kwargs)
async def prepare(self, ctx: ApplicationContext) -> None:
# This should be same across all 3 types
ctx.command = self
if not await self.can_run(ctx):
raise CheckFailure(
f"The check functions for the command {self.name} failed"
)
# TODO: Add cooldown
await self.call_before_hooks(ctx)
async def invoke(self, ctx: ApplicationContext) -> None:
await self.prepare(ctx)
injected = hooked_wrapped_callback(self, ctx, self._invoke)
await injected(ctx)
async def can_run(self, ctx: ApplicationContext) -> bool:
if not await ctx.bot.can_run(ctx):
raise CheckFailure(
f"The global check functions for command {self.name} failed."
)
predicates = self.checks
if not predicates:
# since we have no checks, then we just return True.
return True
return await async_all(predicate(ctx) for predicate in predicates) # type: ignore
async def dispatch_error(self, ctx: ApplicationContext, error: Exception) -> None:
ctx.command_failed = True
cog = self.cog
try:
coro = self.on_error
except AttributeError:
pass
else:
injected = wrap_callback(coro)
if cog is not None:
await injected(cog, ctx, error)
else:
await injected(ctx, error)
try:
if cog is not None:
local = cog.__class__._get_overridden_method(cog.cog_command_error)
if local is not None:
wrapped = wrap_callback(local)
await wrapped(ctx, error)
finally:
ctx.bot.dispatch("application_command_error", ctx, error)
def _get_signature_parameters(self):
return OrderedDict(inspect.signature(self.callback).parameters)
def error(self, coro):
"""A decorator that registers a coroutine as a local error handler.
A local error handler is an :func:`.on_command_error` event limited to
a single command. However, the :func:`.on_command_error` is still
invoked afterwards as the catch-all.
Parameters
-----------
coro: :ref:`coroutine <coroutine>`
The coroutine to register as the local error handler.
Raises
-------
TypeError
The coroutine passed is not actually a coroutine.
"""
if not asyncio.iscoroutinefunction(coro):
raise TypeError("The error handler must be a coroutine.")
self.on_error = coro
return coro
def has_error_handler(self) -> bool:
""":class:`bool`: Checks whether the command has an error handler registered."""
return hasattr(self, "on_error")
def before_invoke(self, coro):
"""A decorator that registers a coroutine as a pre-invoke hook.
A pre-invoke hook is called directly before the command is
called. This makes it a useful function to set up database
connections or any type of set up required.
This pre-invoke hook takes a sole parameter, a :class:`.Context`.
See :meth:`.Bot.before_invoke` for more info.
Parameters
-----------
coro: :ref:`coroutine <coroutine>`
The coroutine to register as the pre-invoke hook.
Raises
-------
TypeError
The coroutine passed is not actually a coroutine.
"""
if not asyncio.iscoroutinefunction(coro):
raise TypeError("The pre-invoke hook must be a coroutine.")
self._before_invoke = coro
return coro
def after_invoke(self, coro):
"""A decorator that registers a coroutine as a post-invoke hook.
A post-invoke hook is called directly after the command is
called. This makes it a useful function to clean-up database
connections or any type of clean up required.
This post-invoke hook takes a sole parameter, a :class:`.Context`.
See :meth:`.Bot.after_invoke` for more info.
Parameters
-----------
coro: :ref:`coroutine <coroutine>`
The coroutine to register as the post-invoke hook.
Raises
-------
TypeError
The coroutine passed is not actually a coroutine.
"""
if not asyncio.iscoroutinefunction(coro):
raise TypeError("The post-invoke hook must be a coroutine.")
self._after_invoke = coro
return coro
async def call_before_hooks(self, ctx: ApplicationContext) -> None:
# now that we're done preparing we can call the pre-command hooks
# first, call the command local hook:
cog = self.cog
if self._before_invoke is not None:
# should be cog if @commands.before_invoke is used
instance = getattr(self._before_invoke, "__self__", cog)
# __self__ only exists for methods, not functions
# however, if @command.before_invoke is used, it will be a function
if instance:
await self._before_invoke(instance, ctx) # type: ignore
else:
await self._before_invoke(ctx) # type: ignore
# call the cog local hook if applicable:
if cog is not None:
hook = cog.__class__._get_overridden_method(cog.cog_before_invoke)
if hook is not None:
await hook(ctx)
# call the bot global hook if necessary
hook = ctx.bot._before_invoke
if hook is not None:
await hook(ctx)
async def call_after_hooks(self, ctx: ApplicationContext) -> None:
cog = self.cog
if self._after_invoke is not None:
instance = getattr(self._after_invoke, "__self__", cog)
if instance:
await self._after_invoke(instance, ctx) # type: ignore
else:
await self._after_invoke(ctx) # type: ignore
# call the cog local hook if applicable:
if cog is not None:
hook = cog.__class__._get_overridden_method(cog.cog_after_invoke)
if hook is not None:
await hook(ctx)
hook = ctx.bot._after_invoke
if hook is not None:
await hook(ctx)
@property
def full_parent_name(self) -> str:
""":class:`str`: Retrieves the fully qualified parent command name.
This the base command name required to execute it. For example,
in ``/one two three`` the parent name would be ``one two``.
"""
entries = []
command = self
while command.parent is not None and hasattr(command.parent, "name"):
command = command.parent
entries.append(command.name)
return " ".join(reversed(entries))
def qualified_name(self) -> str:
""":class:`str`: Retrieves the fully qualified command name.
This is the full parent name with the command name as well.
For example, in ``/one two three`` the qualified name would be
``one two three``.
"""
parent = self.full_parent_name
if parent:
return parent + " " + self.name
else:
return self.name
class SlashCommand(ApplicationCommand):
r"""A class that implements the protocol for a slash command.
These are not created manually, instead they are created via the
decorator or functional interface.
Attributes
-----------
name: :class:`str`
The name of the command.
callback: :ref:`coroutine <coroutine>`
The coroutine that is executed when the command is called.
description: Optional[:class:`str`]
The description for the command.
guild_ids: Optional[List[:class:`int`]]
The ids of the guilds where this command will be registered.
options: List[:class:`Option`]
The parameters for this command.
parent: Optional[:class:`SlashCommandGroup`]
The parent group that this command belongs to. ``None`` if there
isn't one.
default_permission: :class:`bool`
Whether the command is enabled by default when it is added to a guild.
permissions: List[:class:`Permission`]
The permissions for this command.
.. note::
If this is not empty then default_permissions will be set to False.
cog: Optional[:class:`Cog`]
The cog that this command belongs to. ``None`` if there isn't one.
checks: List[Callable[[:class:`.ApplicationContext`], :class:`bool`]]
A list of predicates that verifies if the command could be executed
with the given :class:`.ApplicationContext` as the sole parameter. If an exception
is necessary to be thrown to signal failure, then one inherited from
:exc:`.CommandError` should be used. Note that if the checks fail then
:exc:`.CheckFailure` exception is raised to the :func:`.on_application_command_error`
event.
"""
type = 1
def __new__(cls, *args, **kwargs) -> SlashCommand:
self = super().__new__(cls)
self.__original_kwargs__ = kwargs.copy()
return self
def __init__(self, func: Callable, *args, **kwargs) -> None:
if not asyncio.iscoroutinefunction(func):
raise TypeError("Callback must be a coroutine.")
self.callback = func
self.guild_ids: Optional[List[int]] = kwargs.get("guild_ids", None)
name = kwargs.get("name") or func.__name__
validate_chat_input_name(name)
self.name: str = name
self.id = None
description = kwargs.get("description") or (
inspect.cleandoc(func.__doc__).splitlines()[0]
if func.__doc__ is not None
else "No description provided"
)
validate_chat_input_description(description)
self.description: str = description
self.parent = kwargs.get("parent")
self.is_subcommand: bool = self.parent is not None
self.cog = None
params = self._get_signature_parameters()
self.options: List[Option] = kwargs.get("options") or self._parse_options(
params
)
try:
checks = func.__commands_checks__
checks.reverse()
except AttributeError:
checks = kwargs.get("checks", [])
self.checks = checks
self._before_invoke = None
self._after_invoke = None
# Permissions
self.default_permission = kwargs.get("default_permission", True)
self.permissions: List[Permission] = getattr(
func, "__app_cmd_perms__", []
) + kwargs.get("permissions", [])
if self.permissions and self.default_permission:
self.default_permission = False
def _parse_options(self, params) -> List[Option]:
final_options = []
if list(params.items())[0][0] == "self":
temp = list(params.items())
temp.pop(0)
params = dict(temp)
params = iter(params.items())
# next we have the 'ctx' as the next parameter
try:
next(params)
except StopIteration:
raise ClientException(
f'Callback for {self.name} command is missing "ctx" parameter.'
)
final_options = []
for p_name, p_obj in params:
option = p_obj.annotation
if option == inspect.Parameter.empty:
option = str
if self._is_typing_union(option):
if self._is_typing_optional(option):
option = Option(
option.__args__[0], "No description provided", required=False
)
else:
option = Option(option.__args__, "No description provided")
if not isinstance(option, Option):
option = Option(option, "No description provided")
if p_obj.default != inspect.Parameter.empty:
option.required = False
option.default = (
option.default if option.default is not None else p_obj.default
)
if option.default == inspect.Parameter.empty:
option.default = None
if option.name is None:
option.name = p_name
option._parameter_name = p_name
final_options.append(option)
return final_options
def _is_typing_union(self, annotation):
return getattr(annotation, "__origin__", None) is Union or type(
annotation
) is getattr(
types, "UnionType", Union
) # type: ignore
def _is_typing_optional(self, annotation):
return self._is_typing_union(annotation) and type(None) in annotation.__args__ # type: ignore
def to_dict(self) -> Dict:
as_dict = {
"name": self.name,
"description": self.description,
"options": [o.to_dict() for o in self.options],
"default_permission": self.default_permission,
}
if self.is_subcommand:
as_dict["type"] = SlashCommandOptionType.sub_command.value
return as_dict
def __eq__(self, other) -> bool:
return (
isinstance(other, SlashCommand)
and other.name == self.name
and other.description == self.description
)
async def _invoke(self, ctx: ApplicationContext) -> None:
# TODO: Parse the args better
kwargs = {}
for arg in ctx.interaction.data.get("options", []):
op = find(lambda x: x.name == arg["name"], self.options)
arg = arg["value"]
# Checks if input_type is user, role or channel
if (
SlashCommandOptionType.user.value
<= op.input_type.value
<= SlashCommandOptionType.role.value
):
name = "member" if op.input_type.name == "user" else op.input_type.name
arg = await get_or_fetch(ctx.guild, name, int(arg), default=int(arg))
elif op.input_type == SlashCommandOptionType.mentionable:
arg_id = int(arg)
arg = await get_or_fetch(ctx.guild, "member", arg_id)
if arg is None:
arg = ctx.guild.get_role(arg_id) or arg_id
elif (
op.input_type == SlashCommandOptionType.string
and op._converter is not None
):
arg = await op._converter.convert(ctx, arg)
kwargs[op._parameter_name] = arg
for o in self.options:
if o._parameter_name not in kwargs:
kwargs[o._parameter_name] = o.default
if self.cog is not None:
await self.callback(self.cog, ctx, **kwargs)
else:
await self.callback(ctx, **kwargs)
async def invoke_autocomplete_callback(self, ctx: AutocompleteContext):
values = {i.name: i.default for i in self.options}
for op in ctx.interaction.data.get("options", []):
if op.get("focused", False):
option = find(lambda o: o.name == op["name"], self.options)
values.update(
{i["name"]: i["value"] for i in ctx.interaction.data["options"]}
)
ctx.command = self
ctx.focused = option
ctx.value = op.get("value")
ctx.options = values
if len(inspect.signature(option.autocomplete).parameters) == 2:
instance = getattr(option.autocomplete, "__self__", ctx.cog)
result = option.autocomplete(instance, ctx)
else:
result = option.autocomplete(ctx)
if asyncio.iscoroutinefunction(option.autocomplete):
result = await result
choices = [
o if isinstance(o, OptionChoice) else OptionChoice(o)
for o in result
][:25]
return await ctx.interaction.response.send_autocomplete_result(
choices=choices
)
def copy(self):
"""Creates a copy of this command.
Returns
--------
:class:`SlashCommand`
A new instance of this command.
"""
ret = self.__class__(self.callback, **self.__original_kwargs__)
return self._ensure_assignment_on_copy(ret)
def _ensure_assignment_on_copy(self, other):
other._before_invoke = self._before_invoke
other._after_invoke = self._after_invoke
if self.checks != other.checks:
other.checks = self.checks.copy()
# if self._buckets.valid and not other._buckets.valid:
# other._buckets = self._buckets.copy()
# if self._max_concurrency != other._max_concurrency:
# # _max_concurrency won't be None at this point
# other._max_concurrency = self._max_concurrency.copy() # type: ignore
try:
other.on_error = self.on_error
except AttributeError:
pass
return other
def _update_copy(self, kwargs: Dict[str, Any]):
if kwargs:
kw = kwargs.copy()
kw.update(self.__original_kwargs__)
copy = self.__class__(self.callback, **kw)
return self._ensure_assignment_on_copy(copy)
else:
return self.copy()
channel_type_map = {
"TextChannel": ChannelType.text,
"VoiceChannel": ChannelType.voice,
"StageChannel": ChannelType.stage_voice,
"CategoryChannel": ChannelType.category,
}
class Option:
def __init__(self, input_type: Any, /, description: str = None, **kwargs) -> None:
self.name: Optional[str] = kwargs.pop("name", None)
self.description = description or "No description provided"
self._converter = None
self.channel_types: List[SlashCommandOptionType] = kwargs.pop(
"channel_types", []
)
if not isinstance(input_type, SlashCommandOptionType):
if hasattr(input_type, "convert"):
self._converter = input_type
input_type = SlashCommandOptionType.string
else:
_type = SlashCommandOptionType.from_datatype(input_type)
if _type == SlashCommandOptionType.channel:
if not isinstance(input_type, tuple):
input_type = (input_type,)
for i in input_type:
if i.__name__ == "GuildChannel":
continue
channel_type = channel_type_map[i.__name__]
self.channel_types.append(channel_type)
input_type = _type
self.input_type = input_type
self.required: bool = kwargs.pop("required", True)
self.choices: List[OptionChoice] = [
o if isinstance(o, OptionChoice) else OptionChoice(o)
for o in kwargs.pop("choices", list())
]
self.default = kwargs.pop("default", None)
if self.input_type == SlashCommandOptionType.integer:
minmax_types = (int, type(None))
elif self.input_type == SlashCommandOptionType.number:
minmax_types = (int, float, type(None))
else:
minmax_types = (type(None),)
minmax_typehint = Optional[Union[minmax_types]] # type: ignore
self.min_value: minmax_typehint = kwargs.pop("min_value", None)
self.max_value: minmax_typehint = kwargs.pop("max_value", None)
if not (isinstance(self.min_value, minmax_types) or self.min_value is None):
raise TypeError(
f'Expected {minmax_typehint} for min_value, got "{type(self.min_value).__name__}"'
)
if not (isinstance(self.max_value, minmax_types) or self.min_value is None):
raise TypeError(
f'Expected {minmax_typehint} for max_value, got "{type(self.max_value).__name__}"'
)
self.autocomplete = kwargs.pop("autocomplete", None)
def to_dict(self) -> Dict:
as_dict = {
"name": self.name,
"description": self.description,
"type": self.input_type.value,
"required": self.required,
"choices": [c.to_dict() for c in self.choices],
"autocomplete": bool(self.autocomplete),
}
if self.channel_types:
as_dict["channel_types"] = [t.value for t in self.channel_types]
if self.min_value is not None:
as_dict["min_value"] = self.min_value
if self.max_value is not None:
as_dict["max_value"] = self.max_value
return as_dict
def __repr__(self):
return f"<discord.commands.{self.__class__.__name__} name={self.name}>"
class OptionChoice:
def __init__(self, name: str, value: Optional[Union[str, int, float]] = None):
self.name = name
self.value = value or name
def to_dict(self) -> Dict[str, Union[str, int, float]]:
return {"name": self.name, "value": self.value}
def option(name, type=None, **kwargs):
"""A decorator that can be used instead of typehinting Option"""
def decor(func):
nonlocal type
type = type or func.__annotations__.get(name, str)
func.__annotations__[name] = Option(type, **kwargs)
return func
return decor
class SlashCommandGroup(ApplicationCommand, Option):
r"""A class that implements the protocol for a slash command group.
These can be created manually, but they should be created via the
decorator or functional interface.
Attributes
-----------
name: :class:`str`
The name of the command.
description: Optional[:class:`str`]
The description for the command.
guild_ids: Optional[List[:class:`int`]]
The ids of the guilds where this command will be registered.
parent: Optional[:class:`SlashCommandGroup`]
The parent group that this group belongs to. ``None`` if there
isn't one.
subcommands: List[Union[:class:`SlashCommand`, :class:`SlashCommandGroup`]]
The list of all subcommands under this group.
cog: Optional[:class:`Cog`]
The cog that this command belongs to. ``None`` if there isn't one.
checks: List[Callable[[:class:`.ApplicationContext`], :class:`bool`]]
A list of predicates that verifies if the command could be executed
with the given :class:`.ApplicationContext` as the sole parameter. If an exception
is necessary to be thrown to signal failure, then one inherited from
:exc:`.CommandError` should be used. Note that if the checks fail then
:exc:`.CheckFailure` exception is raised to the :func:`.on_application_command_error`
event.
"""
type = 1
def __new__(cls, *args, **kwargs) -> SlashCommandGroup:
self = super().__new__(cls)
self.__original_kwargs__ = kwargs.copy()
return self
def __init__(
self,
name: str,
description: str,
guild_ids: Optional[List[int]] = None,
parent: Optional[SlashCommandGroup] = None,
**kwargs,
) -> None:
validate_chat_input_name(name)
validate_chat_input_description(description)
super().__init__(
SlashCommandOptionType.sub_command_group,
name=name,
description=description,
)
self.subcommands: List[Union[SlashCommand, SlashCommandGroup]] = []
self.guild_ids = guild_ids
self.parent = parent
self.checks = []
self._before_invoke = None
self._after_invoke = None
self.cog = None
# Permissions
self.default_permission = kwargs.get("default_permission", True)
self.permissions: List[Permission] = kwargs.get("permissions", [])
if self.permissions and self.default_permission:
self.default_permission = False
def to_dict(self) -> Dict:
as_dict = {
"name": self.name,
"description": self.description,
"options": [c.to_dict() for c in self.subcommands],
}
if self.parent is not None:
as_dict["type"] = self.input_type.value
return as_dict
def command(self, **kwargs) -> SlashCommand:
def wrap(func) -> SlashCommand:
command = SlashCommand(func, parent=self, **kwargs)
self.subcommands.append(command)
return command
return wrap
def command_group(self, name, description) -> SlashCommandGroup:
if self.parent is not None:
# TODO: Improve this error message
raise Exception("Subcommands can only be nested once")
sub_command_group = SlashCommandGroup(name, description, parent=self)
self.subcommands.append(sub_command_group)
return sub_command_group
async def _invoke(self, ctx: ApplicationContext) -> None:
option = ctx.interaction.data["options"][0]
command = find(lambda x: x.name == option["name"], self.subcommands)
ctx.interaction.data = option
await command.invoke(ctx)
async def invoke_autocomplete_callback(self, ctx: AutocompleteContext) -> None:
option = ctx.interaction.data["options"][0]
command = find(lambda x: x.name == option["name"], self.subcommands)
ctx.interaction.data = option
await command.invoke_autocomplete_callback(ctx)
class ContextMenuCommand(ApplicationCommand):
r"""A class that implements the protocol for context menu commands.
These are not created manually, instead they are created via the
decorator or functional interface.
Attributes
-----------
name: :class:`str`
The name of the command.
callback: :ref:`coroutine <coroutine>`
The coroutine that is executed when the command is called.
guild_ids: Optional[List[:class:`int`]]
The ids of the guilds where this command will be registered.
cog: Optional[:class:`Cog`]
The cog that this command belongs to. ``None`` if there isn't one.
checks: List[Callable[[:class:`.ApplicationContext`], :class:`bool`]]
A list of predicates that verifies if the command could be executed
with the given :class:`.ApplicationContext` as the sole parameter. If an exception
is necessary to be thrown to signal failure, then one inherited from
:exc:`.CommandError` should be used. Note that if the checks fail then
:exc:`.CheckFailure` exception is raised to the :func:`.on_application_command_error`
event.
"""
def __new__(cls, *args, **kwargs) -> ContextMenuCommand:
self = super().__new__(cls)
self.__original_kwargs__ = kwargs.copy()
return self
def __init__(self, func: Callable, *args, **kwargs) -> None:
if not asyncio.iscoroutinefunction(func):
raise TypeError("Callback must be a coroutine.")
self.callback = func
self.guild_ids: Optional[List[int]] = kwargs.get("guild_ids", None)
# Discord API doesn't support setting descriptions for context menu commands
# so it must be empty
self.description = ""
self.name: str = kwargs.pop("name", func.__name__)
if not isinstance(self.name, str):
raise TypeError("Name of a command must be a string.")
self.cog = None
try:
checks = func.__commands_checks__
checks.reverse()
except AttributeError:
checks = kwargs.get("checks", [])
self.checks = checks
self._before_invoke = None
self._after_invoke = None
self.validate_parameters()
# Context Menu commands don't have permissions
self.permissions = []
# Context Menu commands can't have parents
self.parent = None
def validate_parameters(self):
params = self._get_signature_parameters()
if list(params.items())[0][0] == "self":
temp = list(params.items())
temp.pop(0)
params = dict(temp)
params = iter(params)
# next we have the 'ctx' as the next parameter
try:
next(params)
except StopIteration:
raise ClientException(
f'Callback for {self.name} command is missing "ctx" parameter.'
)
# next we have the 'user/message' as the next parameter
try:
next(params)
except StopIteration:
cmd = "user" if type(self) == UserCommand else "message"
raise ClientException(
f'Callback for {self.name} command is missing "{cmd}" parameter.'
)
# next there should be no more parameters
try:
next(params)
raise ClientException(
f"Callback for {self.name} command has too many parameters."
)
except StopIteration:
pass
def qualified_name(self):
return self.name
def to_dict(self) -> Dict[str, Union[str, int]]:
return {"name": self.name, "description": self.description, "type": self.type}
class UserCommand(ContextMenuCommand):
type = 2
def __new__(cls, *args, **kwargs) -> UserCommand:
self = super().__new__(cls)
self.__original_kwargs__ = kwargs.copy()
return self
async def _invoke(self, ctx: ApplicationContext) -> None:
if "members" not in ctx.interaction.data["resolved"]:
_data = ctx.interaction.data["resolved"]["users"]
for i, v in _data.items():
v["id"] = int(i)
user = v
target = User(state=ctx.interaction._state, data=user)
else:
_data = ctx.interaction.data["resolved"]["members"]
for i, v in _data.items():
v["id"] = int(i)
member = v
_data = ctx.interaction.data["resolved"]["users"]
for i, v in _data.items():
v["id"] = int(i)
user = v
member["user"] = user
target = Member(
data=member,
guild=ctx.interaction._state._get_guild(ctx.interaction.guild_id),
state=ctx.interaction._state,
)
if self.cog is not None:
await self.callback(self.cog, ctx, target)
else:
await self.callback(ctx, target)
def copy(self):
"""Creates a copy of this command.
Returns
--------
:class:`UserCommand`
A new instance of this command.
"""
ret = self.__class__(self.callback, **self.__original_kwargs__)
return self._ensure_assignment_on_copy(ret)
def _ensure_assignment_on_copy(self, other):
other._before_invoke = self._before_invoke
other._after_invoke = self._after_invoke
if self.checks != other.checks:
other.checks = self.checks.copy()
# if self._buckets.valid and not other._buckets.valid:
# other._buckets = self._buckets.copy()
# if self._max_concurrency != other._max_concurrency:
# # _max_concurrency won't be None at this point
# other._max_concurrency = self._max_concurrency.copy() # type: ignore
try:
other.on_error = self.on_error
except AttributeError:
pass
return other
def _update_copy(self, kwargs: Dict[str, Any]):
if kwargs:
kw = kwargs.copy()
kw.update(self.__original_kwargs__)
copy = self.__class__(self.callback, **kw)
return self._ensure_assignment_on_copy(copy)
else:
return self.copy()
class MessageCommand(ContextMenuCommand):
type = 3
def __new__(cls, *args, **kwargs) -> MessageCommand:
self = super().__new__(cls)
self.__original_kwargs__ = kwargs.copy()
return self
async def _invoke(self, ctx: ApplicationContext):
_data = ctx.interaction.data["resolved"]["messages"]
for i, v in _data.items():
v["id"] = int(i)
message = v
channel = ctx.interaction._state.get_channel(int(message["channel_id"]))
if channel is None:
data = await ctx.interaction._state.http.start_private_message(
int(message["author"]["id"])
)
channel = ctx.interaction._state.add_dm_channel(data)
target = Message(state=ctx.interaction._state, channel=channel, data=message)
if self.cog is not None:
await self.callback(self.cog, ctx, target)
else:
await self.callback(ctx, target)
def copy(self):
"""Creates a copy of this command.
Returns
--------
:class:`MessageCommand`
A new instance of this command.
"""
ret = self.__class__(self.callback, **self.__original_kwargs__)
return self._ensure_assignment_on_copy(ret)
def _ensure_assignment_on_copy(self, other):
other._before_invoke = self._before_invoke
other._after_invoke = self._after_invoke
if self.checks != other.checks:
other.checks = self.checks.copy()
# if self._buckets.valid and not other._buckets.valid:
# other._buckets = self._buckets.copy()
# if self._max_concurrency != other._max_concurrency:
# # _max_concurrency won't be None at this point
# other._max_concurrency = self._max_concurrency.copy() # type: ignore
try:
other.on_error = self.on_error
except AttributeError:
pass
return other
def _update_copy(self, kwargs: Dict[str, Any]):
if kwargs:
kw = kwargs.copy()
kw.update(self.__original_kwargs__)
copy = self.__class__(self.callback, **kw)
return self._ensure_assignment_on_copy(copy)
else:
return self.copy()
def slash_command(**kwargs):
"""Decorator for slash commands that invokes :func:`application_command`.
.. versionadded:: 2.0
Returns
--------
Callable[..., :class:`SlashCommand`]
A decorator that converts the provided method into a :class:`.SlashCommand`.
"""
return application_command(cls=SlashCommand, **kwargs)
def user_command(**kwargs):
"""Decorator for user commands that invokes :func:`application_command`.
.. versionadded:: 2.0
Returns
--------
Callable[..., :class:`UserCommand`]
A decorator that converts the provided method into a :class:`.UserCommand`.
"""
return application_command(cls=UserCommand, **kwargs)
def message_command(**kwargs):
"""Decorator for message commands that invokes :func:`application_command`.
.. versionadded:: 2.0
Returns
--------
Callable[..., :class:`MessageCommand`]
A decorator that converts the provided method into a :class:`.MessageCommand`.
"""
return application_command(cls=MessageCommand, **kwargs)
def application_command(cls=SlashCommand, **attrs):
"""A decorator that transforms a function into an :class:`.ApplicationCommand`. More specifically,
usually one of :class:`.SlashCommand`, :class:`.UserCommand`, or :class:`.MessageCommand`. The exact class
depends on the ``cls`` parameter.
By default the ``description`` attribute is received automatically from the
docstring of the function and is cleaned up with the use of
``inspect.cleandoc``. If the docstring is ``bytes``, then it is decoded
into :class:`str` using utf-8 encoding.
The ``name`` attribute also defaults to the function name unchanged.
.. versionadded:: 2.0
Parameters
-----------
cls: :class:`.ApplicationCommand`
The class to construct with. By default this is :class:`.SlashCommand`.
You usually do not change this.
attrs
Keyword arguments to pass into the construction of the class denoted
by ``cls``.
Raises
-------
TypeError
If the function is not a coroutine or is already a command.
"""
def decorator(func: Callable) -> cls:
if isinstance(func, ApplicationCommand):
func = func.callback
elif not callable(func):
raise TypeError(
"func needs to be a callable or a subclass of ApplicationCommand."
)
return cls(func, **attrs)
return decorator
def command(**kwargs):
"""There is an alias for :meth:`application_command`.
.. note::
This decorator is overridden by :func:`commands.command`.
.. versionadded:: 2.0
Returns
--------
Callable[..., :class:`ApplicationCommand`]
A decorator that converts the provided method into an :class:`.ApplicationCommand`.
"""
return application_command(**kwargs)
# Validation
def validate_chat_input_name(name: Any):
if not isinstance(name, str):
raise TypeError("Name of a command must be a string.")
if " " in name:
raise ValidationError("Name of a chat input command cannot have spaces.")
if not name.islower():
raise ValidationError("Name of a chat input command must be lowercase.")
if len(name) > 32 or len(name) < 1:
raise ValidationError(
"Name of a chat input command must be less than 32 characters and non empty."
)
def validate_chat_input_description(description: Any):
if not isinstance(description, str):
raise TypeError("Description of a command must be a string.")
if len(description) > 100 or len(description) < 1:
raise ValidationError(
"Description of a chat input command must be less than 100 characters and non empty."
)
| 35.381033
| 110
| 0.612229
|
from __future__ import annotations
import asyncio
import types
import functools
import inspect
from collections import OrderedDict
from typing import Any, Callable, Dict, List, Optional, Union, TYPE_CHECKING
from ..enums import SlashCommandOptionType, ChannelType
from ..member import Member
from ..user import User
from ..message import Message
from .context import ApplicationContext, AutocompleteContext
from ..utils import find, get_or_fetch, async_all
from ..errors import ValidationError, ClientException
from .errors import ApplicationCommandError, CheckFailure, ApplicationCommandInvokeError
from .permissions import Permission
__all__ = (
"_BaseCommand",
"ApplicationCommand",
"SlashCommand",
"Option",
"OptionChoice",
"option",
"slash_command",
"application_command",
"user_command",
"message_command",
"command",
"SlashCommandGroup",
"ContextMenuCommand",
"UserCommand",
"MessageCommand",
)
if TYPE_CHECKING:
from ..interactions import Interaction
def wrap_callback(coro):
@functools.wraps(coro)
async def wrapped(*args, **kwargs):
try:
ret = await coro(*args, **kwargs)
except ApplicationCommandError:
raise
except asyncio.CancelledError:
return
except Exception as exc:
raise ApplicationCommandInvokeError(exc) from exc
return ret
return wrapped
def hooked_wrapped_callback(command, ctx, coro):
@functools.wraps(coro)
async def wrapped(arg):
try:
ret = await coro(arg)
except ApplicationCommandError:
raise
except asyncio.CancelledError:
return
except Exception as exc:
raise ApplicationCommandInvokeError(exc) from exc
finally:
await command.call_after_hooks(ctx)
return ret
return wrapped
class _BaseCommand:
__slots__ = ()
class ApplicationCommand(_BaseCommand):
cog = None
def __repr__(self):
return f"<discord.commands.{self.__class__.__name__} name={self.name}>"
def __eq__(self, other):
return isinstance(other, self.__class__)
async def __call__(self, ctx, *args, **kwargs):
return await self.callback(ctx, *args, **kwargs)
async def prepare(self, ctx: ApplicationContext) -> None:
ctx.command = self
if not await self.can_run(ctx):
raise CheckFailure(
f"The check functions for the command {self.name} failed"
)
await self.call_before_hooks(ctx)
async def invoke(self, ctx: ApplicationContext) -> None:
await self.prepare(ctx)
injected = hooked_wrapped_callback(self, ctx, self._invoke)
await injected(ctx)
async def can_run(self, ctx: ApplicationContext) -> bool:
if not await ctx.bot.can_run(ctx):
raise CheckFailure(
f"The global check functions for command {self.name} failed."
)
predicates = self.checks
if not predicates:
return True
return await async_all(predicate(ctx) for predicate in predicates)
async def dispatch_error(self, ctx: ApplicationContext, error: Exception) -> None:
ctx.command_failed = True
cog = self.cog
try:
coro = self.on_error
except AttributeError:
pass
else:
injected = wrap_callback(coro)
if cog is not None:
await injected(cog, ctx, error)
else:
await injected(ctx, error)
try:
if cog is not None:
local = cog.__class__._get_overridden_method(cog.cog_command_error)
if local is not None:
wrapped = wrap_callback(local)
await wrapped(ctx, error)
finally:
ctx.bot.dispatch("application_command_error", ctx, error)
def _get_signature_parameters(self):
return OrderedDict(inspect.signature(self.callback).parameters)
def error(self, coro):
if not asyncio.iscoroutinefunction(coro):
raise TypeError("The error handler must be a coroutine.")
self.on_error = coro
return coro
def has_error_handler(self) -> bool:
return hasattr(self, "on_error")
def before_invoke(self, coro):
if not asyncio.iscoroutinefunction(coro):
raise TypeError("The pre-invoke hook must be a coroutine.")
self._before_invoke = coro
return coro
def after_invoke(self, coro):
if not asyncio.iscoroutinefunction(coro):
raise TypeError("The post-invoke hook must be a coroutine.")
self._after_invoke = coro
return coro
async def call_before_hooks(self, ctx: ApplicationContext) -> None:
# first, call the command local hook:
cog = self.cog
if self._before_invoke is not None:
# should be cog if @commands.before_invoke is used
instance = getattr(self._before_invoke, "__self__", cog)
# __self__ only exists for methods, not functions
# however, if @command.before_invoke is used, it will be a function
if instance:
await self._before_invoke(instance, ctx) # type: ignore
else:
await self._before_invoke(ctx) # type: ignore
# call the cog local hook if applicable:
if cog is not None:
hook = cog.__class__._get_overridden_method(cog.cog_before_invoke)
if hook is not None:
await hook(ctx)
# call the bot global hook if necessary
hook = ctx.bot._before_invoke
if hook is not None:
await hook(ctx)
async def call_after_hooks(self, ctx: ApplicationContext) -> None:
cog = self.cog
if self._after_invoke is not None:
instance = getattr(self._after_invoke, "__self__", cog)
if instance:
await self._after_invoke(instance, ctx) # type: ignore
else:
await self._after_invoke(ctx) # type: ignore
# call the cog local hook if applicable:
if cog is not None:
hook = cog.__class__._get_overridden_method(cog.cog_after_invoke)
if hook is not None:
await hook(ctx)
hook = ctx.bot._after_invoke
if hook is not None:
await hook(ctx)
@property
def full_parent_name(self) -> str:
entries = []
command = self
while command.parent is not None and hasattr(command.parent, "name"):
command = command.parent
entries.append(command.name)
return " ".join(reversed(entries))
def qualified_name(self) -> str:
parent = self.full_parent_name
if parent:
return parent + " " + self.name
else:
return self.name
class SlashCommand(ApplicationCommand):
type = 1
def __new__(cls, *args, **kwargs) -> SlashCommand:
self = super().__new__(cls)
self.__original_kwargs__ = kwargs.copy()
return self
def __init__(self, func: Callable, *args, **kwargs) -> None:
if not asyncio.iscoroutinefunction(func):
raise TypeError("Callback must be a coroutine.")
self.callback = func
self.guild_ids: Optional[List[int]] = kwargs.get("guild_ids", None)
name = kwargs.get("name") or func.__name__
validate_chat_input_name(name)
self.name: str = name
self.id = None
description = kwargs.get("description") or (
inspect.cleandoc(func.__doc__).splitlines()[0]
if func.__doc__ is not None
else "No description provided"
)
validate_chat_input_description(description)
self.description: str = description
self.parent = kwargs.get("parent")
self.is_subcommand: bool = self.parent is not None
self.cog = None
params = self._get_signature_parameters()
self.options: List[Option] = kwargs.get("options") or self._parse_options(
params
)
try:
checks = func.__commands_checks__
checks.reverse()
except AttributeError:
checks = kwargs.get("checks", [])
self.checks = checks
self._before_invoke = None
self._after_invoke = None
# Permissions
self.default_permission = kwargs.get("default_permission", True)
self.permissions: List[Permission] = getattr(
func, "__app_cmd_perms__", []
) + kwargs.get("permissions", [])
if self.permissions and self.default_permission:
self.default_permission = False
def _parse_options(self, params) -> List[Option]:
final_options = []
if list(params.items())[0][0] == "self":
temp = list(params.items())
temp.pop(0)
params = dict(temp)
params = iter(params.items())
# next we have the 'ctx' as the next parameter
try:
next(params)
except StopIteration:
raise ClientException(
f'Callback for {self.name} command is missing "ctx" parameter.'
)
final_options = []
for p_name, p_obj in params:
option = p_obj.annotation
if option == inspect.Parameter.empty:
option = str
if self._is_typing_union(option):
if self._is_typing_optional(option):
option = Option(
option.__args__[0], "No description provided", required=False
)
else:
option = Option(option.__args__, "No description provided")
if not isinstance(option, Option):
option = Option(option, "No description provided")
if p_obj.default != inspect.Parameter.empty:
option.required = False
option.default = (
option.default if option.default is not None else p_obj.default
)
if option.default == inspect.Parameter.empty:
option.default = None
if option.name is None:
option.name = p_name
option._parameter_name = p_name
final_options.append(option)
return final_options
def _is_typing_union(self, annotation):
return getattr(annotation, "__origin__", None) is Union or type(
annotation
) is getattr(
types, "UnionType", Union
) # type: ignore
def _is_typing_optional(self, annotation):
return self._is_typing_union(annotation) and type(None) in annotation.__args__ # type: ignore
def to_dict(self) -> Dict:
as_dict = {
"name": self.name,
"description": self.description,
"options": [o.to_dict() for o in self.options],
"default_permission": self.default_permission,
}
if self.is_subcommand:
as_dict["type"] = SlashCommandOptionType.sub_command.value
return as_dict
def __eq__(self, other) -> bool:
return (
isinstance(other, SlashCommand)
and other.name == self.name
and other.description == self.description
)
async def _invoke(self, ctx: ApplicationContext) -> None:
# TODO: Parse the args better
kwargs = {}
for arg in ctx.interaction.data.get("options", []):
op = find(lambda x: x.name == arg["name"], self.options)
arg = arg["value"]
# Checks if input_type is user, role or channel
if (
SlashCommandOptionType.user.value
<= op.input_type.value
<= SlashCommandOptionType.role.value
):
name = "member" if op.input_type.name == "user" else op.input_type.name
arg = await get_or_fetch(ctx.guild, name, int(arg), default=int(arg))
elif op.input_type == SlashCommandOptionType.mentionable:
arg_id = int(arg)
arg = await get_or_fetch(ctx.guild, "member", arg_id)
if arg is None:
arg = ctx.guild.get_role(arg_id) or arg_id
elif (
op.input_type == SlashCommandOptionType.string
and op._converter is not None
):
arg = await op._converter.convert(ctx, arg)
kwargs[op._parameter_name] = arg
for o in self.options:
if o._parameter_name not in kwargs:
kwargs[o._parameter_name] = o.default
if self.cog is not None:
await self.callback(self.cog, ctx, **kwargs)
else:
await self.callback(ctx, **kwargs)
async def invoke_autocomplete_callback(self, ctx: AutocompleteContext):
values = {i.name: i.default for i in self.options}
for op in ctx.interaction.data.get("options", []):
if op.get("focused", False):
option = find(lambda o: o.name == op["name"], self.options)
values.update(
{i["name"]: i["value"] for i in ctx.interaction.data["options"]}
)
ctx.command = self
ctx.focused = option
ctx.value = op.get("value")
ctx.options = values
if len(inspect.signature(option.autocomplete).parameters) == 2:
instance = getattr(option.autocomplete, "__self__", ctx.cog)
result = option.autocomplete(instance, ctx)
else:
result = option.autocomplete(ctx)
if asyncio.iscoroutinefunction(option.autocomplete):
result = await result
choices = [
o if isinstance(o, OptionChoice) else OptionChoice(o)
for o in result
][:25]
return await ctx.interaction.response.send_autocomplete_result(
choices=choices
)
def copy(self):
ret = self.__class__(self.callback, **self.__original_kwargs__)
return self._ensure_assignment_on_copy(ret)
def _ensure_assignment_on_copy(self, other):
other._before_invoke = self._before_invoke
other._after_invoke = self._after_invoke
if self.checks != other.checks:
other.checks = self.checks.copy()
# if self._buckets.valid and not other._buckets.valid:
# other._buckets = self._buckets.copy()
# if self._max_concurrency != other._max_concurrency:
# # _max_concurrency won't be None at this point
other.on_error = self.on_error
except AttributeError:
pass
return other
def _update_copy(self, kwargs: Dict[str, Any]):
if kwargs:
kw = kwargs.copy()
kw.update(self.__original_kwargs__)
copy = self.__class__(self.callback, **kw)
return self._ensure_assignment_on_copy(copy)
else:
return self.copy()
channel_type_map = {
"TextChannel": ChannelType.text,
"VoiceChannel": ChannelType.voice,
"StageChannel": ChannelType.stage_voice,
"CategoryChannel": ChannelType.category,
}
class Option:
def __init__(self, input_type: Any, /, description: str = None, **kwargs) -> None:
self.name: Optional[str] = kwargs.pop("name", None)
self.description = description or "No description provided"
self._converter = None
self.channel_types: List[SlashCommandOptionType] = kwargs.pop(
"channel_types", []
)
if not isinstance(input_type, SlashCommandOptionType):
if hasattr(input_type, "convert"):
self._converter = input_type
input_type = SlashCommandOptionType.string
else:
_type = SlashCommandOptionType.from_datatype(input_type)
if _type == SlashCommandOptionType.channel:
if not isinstance(input_type, tuple):
input_type = (input_type,)
for i in input_type:
if i.__name__ == "GuildChannel":
continue
channel_type = channel_type_map[i.__name__]
self.channel_types.append(channel_type)
input_type = _type
self.input_type = input_type
self.required: bool = kwargs.pop("required", True)
self.choices: List[OptionChoice] = [
o if isinstance(o, OptionChoice) else OptionChoice(o)
for o in kwargs.pop("choices", list())
]
self.default = kwargs.pop("default", None)
if self.input_type == SlashCommandOptionType.integer:
minmax_types = (int, type(None))
elif self.input_type == SlashCommandOptionType.number:
minmax_types = (int, float, type(None))
else:
minmax_types = (type(None),)
minmax_typehint = Optional[Union[minmax_types]]
self.min_value: minmax_typehint = kwargs.pop("min_value", None)
self.max_value: minmax_typehint = kwargs.pop("max_value", None)
if not (isinstance(self.min_value, minmax_types) or self.min_value is None):
raise TypeError(
f'Expected {minmax_typehint} for min_value, got "{type(self.min_value).__name__}"'
)
if not (isinstance(self.max_value, minmax_types) or self.min_value is None):
raise TypeError(
f'Expected {minmax_typehint} for max_value, got "{type(self.max_value).__name__}"'
)
self.autocomplete = kwargs.pop("autocomplete", None)
def to_dict(self) -> Dict:
as_dict = {
"name": self.name,
"description": self.description,
"type": self.input_type.value,
"required": self.required,
"choices": [c.to_dict() for c in self.choices],
"autocomplete": bool(self.autocomplete),
}
if self.channel_types:
as_dict["channel_types"] = [t.value for t in self.channel_types]
if self.min_value is not None:
as_dict["min_value"] = self.min_value
if self.max_value is not None:
as_dict["max_value"] = self.max_value
return as_dict
def __repr__(self):
return f"<discord.commands.{self.__class__.__name__} name={self.name}>"
class OptionChoice:
def __init__(self, name: str, value: Optional[Union[str, int, float]] = None):
self.name = name
self.value = value or name
def to_dict(self) -> Dict[str, Union[str, int, float]]:
return {"name": self.name, "value": self.value}
def option(name, type=None, **kwargs):
def decor(func):
nonlocal type
type = type or func.__annotations__.get(name, str)
func.__annotations__[name] = Option(type, **kwargs)
return func
return decor
class SlashCommandGroup(ApplicationCommand, Option):
type = 1
def __new__(cls, *args, **kwargs) -> SlashCommandGroup:
self = super().__new__(cls)
self.__original_kwargs__ = kwargs.copy()
return self
def __init__(
self,
name: str,
description: str,
guild_ids: Optional[List[int]] = None,
parent: Optional[SlashCommandGroup] = None,
**kwargs,
) -> None:
validate_chat_input_name(name)
validate_chat_input_description(description)
super().__init__(
SlashCommandOptionType.sub_command_group,
name=name,
description=description,
)
self.subcommands: List[Union[SlashCommand, SlashCommandGroup]] = []
self.guild_ids = guild_ids
self.parent = parent
self.checks = []
self._before_invoke = None
self._after_invoke = None
self.cog = None
self.default_permission = kwargs.get("default_permission", True)
self.permissions: List[Permission] = kwargs.get("permissions", [])
if self.permissions and self.default_permission:
self.default_permission = False
def to_dict(self) -> Dict:
as_dict = {
"name": self.name,
"description": self.description,
"options": [c.to_dict() for c in self.subcommands],
}
if self.parent is not None:
as_dict["type"] = self.input_type.value
return as_dict
def command(self, **kwargs) -> SlashCommand:
def wrap(func) -> SlashCommand:
command = SlashCommand(func, parent=self, **kwargs)
self.subcommands.append(command)
return command
return wrap
def command_group(self, name, description) -> SlashCommandGroup:
if self.parent is not None:
raise Exception("Subcommands can only be nested once")
sub_command_group = SlashCommandGroup(name, description, parent=self)
self.subcommands.append(sub_command_group)
return sub_command_group
async def _invoke(self, ctx: ApplicationContext) -> None:
option = ctx.interaction.data["options"][0]
command = find(lambda x: x.name == option["name"], self.subcommands)
ctx.interaction.data = option
await command.invoke(ctx)
async def invoke_autocomplete_callback(self, ctx: AutocompleteContext) -> None:
option = ctx.interaction.data["options"][0]
command = find(lambda x: x.name == option["name"], self.subcommands)
ctx.interaction.data = option
await command.invoke_autocomplete_callback(ctx)
class ContextMenuCommand(ApplicationCommand):
def __new__(cls, *args, **kwargs) -> ContextMenuCommand:
self = super().__new__(cls)
self.__original_kwargs__ = kwargs.copy()
return self
def __init__(self, func: Callable, *args, **kwargs) -> None:
if not asyncio.iscoroutinefunction(func):
raise TypeError("Callback must be a coroutine.")
self.callback = func
self.guild_ids: Optional[List[int]] = kwargs.get("guild_ids", None)
# so it must be empty
self.description = ""
self.name: str = kwargs.pop("name", func.__name__)
if not isinstance(self.name, str):
raise TypeError("Name of a command must be a string.")
self.cog = None
try:
checks = func.__commands_checks__
checks.reverse()
except AttributeError:
checks = kwargs.get("checks", [])
self.checks = checks
self._before_invoke = None
self._after_invoke = None
self.validate_parameters()
# Context Menu commands don't have permissions
self.permissions = []
self.parent = None
def validate_parameters(self):
params = self._get_signature_parameters()
if list(params.items())[0][0] == "self":
temp = list(params.items())
temp.pop(0)
params = dict(temp)
params = iter(params)
# next we have the 'ctx' as the next parameter
try:
next(params)
except StopIteration:
raise ClientException(
f'Callback for {self.name} command is missing "ctx" parameter.'
)
# next we have the 'user/message' as the next parameter
try:
next(params)
except StopIteration:
cmd = "user" if type(self) == UserCommand else "message"
raise ClientException(
f'Callback for {self.name} command is missing "{cmd}" parameter.'
)
# next there should be no more parameters
try:
next(params)
raise ClientException(
f"Callback for {self.name} command has too many parameters."
)
except StopIteration:
pass
def qualified_name(self):
return self.name
def to_dict(self) -> Dict[str, Union[str, int]]:
return {"name": self.name, "description": self.description, "type": self.type}
class UserCommand(ContextMenuCommand):
type = 2
def __new__(cls, *args, **kwargs) -> UserCommand:
self = super().__new__(cls)
self.__original_kwargs__ = kwargs.copy()
return self
async def _invoke(self, ctx: ApplicationContext) -> None:
if "members" not in ctx.interaction.data["resolved"]:
_data = ctx.interaction.data["resolved"]["users"]
for i, v in _data.items():
v["id"] = int(i)
user = v
target = User(state=ctx.interaction._state, data=user)
else:
_data = ctx.interaction.data["resolved"]["members"]
for i, v in _data.items():
v["id"] = int(i)
member = v
_data = ctx.interaction.data["resolved"]["users"]
for i, v in _data.items():
v["id"] = int(i)
user = v
member["user"] = user
target = Member(
data=member,
guild=ctx.interaction._state._get_guild(ctx.interaction.guild_id),
state=ctx.interaction._state,
)
if self.cog is not None:
await self.callback(self.cog, ctx, target)
else:
await self.callback(ctx, target)
def copy(self):
ret = self.__class__(self.callback, **self.__original_kwargs__)
return self._ensure_assignment_on_copy(ret)
def _ensure_assignment_on_copy(self, other):
other._before_invoke = self._before_invoke
other._after_invoke = self._after_invoke
if self.checks != other.checks:
other.checks = self.checks.copy()
# if self._buckets.valid and not other._buckets.valid:
# other._buckets = self._buckets.copy()
# if self._max_concurrency != other._max_concurrency:
# # _max_concurrency won't be None at this point
other.on_error = self.on_error
except AttributeError:
pass
return other
def _update_copy(self, kwargs: Dict[str, Any]):
if kwargs:
kw = kwargs.copy()
kw.update(self.__original_kwargs__)
copy = self.__class__(self.callback, **kw)
return self._ensure_assignment_on_copy(copy)
else:
return self.copy()
class MessageCommand(ContextMenuCommand):
type = 3
def __new__(cls, *args, **kwargs) -> MessageCommand:
self = super().__new__(cls)
self.__original_kwargs__ = kwargs.copy()
return self
async def _invoke(self, ctx: ApplicationContext):
_data = ctx.interaction.data["resolved"]["messages"]
for i, v in _data.items():
v["id"] = int(i)
message = v
channel = ctx.interaction._state.get_channel(int(message["channel_id"]))
if channel is None:
data = await ctx.interaction._state.http.start_private_message(
int(message["author"]["id"])
)
channel = ctx.interaction._state.add_dm_channel(data)
target = Message(state=ctx.interaction._state, channel=channel, data=message)
if self.cog is not None:
await self.callback(self.cog, ctx, target)
else:
await self.callback(ctx, target)
def copy(self):
ret = self.__class__(self.callback, **self.__original_kwargs__)
return self._ensure_assignment_on_copy(ret)
def _ensure_assignment_on_copy(self, other):
other._before_invoke = self._before_invoke
other._after_invoke = self._after_invoke
if self.checks != other.checks:
other.checks = self.checks.copy()
ax_concurrency.copy() # type: ignore
try:
other.on_error = self.on_error
except AttributeError:
pass
return other
def _update_copy(self, kwargs: Dict[str, Any]):
if kwargs:
kw = kwargs.copy()
kw.update(self.__original_kwargs__)
copy = self.__class__(self.callback, **kw)
return self._ensure_assignment_on_copy(copy)
else:
return self.copy()
def slash_command(**kwargs):
return application_command(cls=SlashCommand, **kwargs)
def user_command(**kwargs):
return application_command(cls=UserCommand, **kwargs)
def message_command(**kwargs):
return application_command(cls=MessageCommand, **kwargs)
def application_command(cls=SlashCommand, **attrs):
def decorator(func: Callable) -> cls:
if isinstance(func, ApplicationCommand):
func = func.callback
elif not callable(func):
raise TypeError(
"func needs to be a callable or a subclass of ApplicationCommand."
)
return cls(func, **attrs)
return decorator
def command(**kwargs):
return application_command(**kwargs)
# Validation
def validate_chat_input_name(name: Any):
if not isinstance(name, str):
raise TypeError("Name of a command must be a string.")
if " " in name:
raise ValidationError("Name of a chat input command cannot have spaces.")
if not name.islower():
raise ValidationError("Name of a chat input command must be lowercase.")
if len(name) > 32 or len(name) < 1:
raise ValidationError(
"Name of a chat input command must be less than 32 characters and non empty."
)
def validate_chat_input_description(description: Any):
if not isinstance(description, str):
raise TypeError("Description of a command must be a string.")
if len(description) > 100 or len(description) < 1:
raise ValidationError(
"Description of a chat input command must be less than 100 characters and non empty."
)
| true
| true
|
f70566c8c49314aaaa034de3b4b4298fd10bc138
| 95,527
|
py
|
Python
|
test/orm/test_deprecations.py
|
edelooff/sqlalchemy
|
97d2a2091ed4caee1e19168d0db39e4d94a6d12f
|
[
"MIT"
] | 1
|
2019-09-27T15:40:23.000Z
|
2019-09-27T15:40:23.000Z
|
test/orm/test_deprecations.py
|
KonstantinKlepikov/sqlalchemy-1
|
2c34d2503a17316cae3282192405b9b9d60df6fe
|
[
"MIT"
] | null | null | null |
test/orm/test_deprecations.py
|
KonstantinKlepikov/sqlalchemy-1
|
2c34d2503a17316cae3282192405b9b9d60df6fe
|
[
"MIT"
] | 1
|
2019-08-27T09:47:08.000Z
|
2019-08-27T09:47:08.000Z
|
import sqlalchemy as sa
from sqlalchemy import and_
from sqlalchemy import event
from sqlalchemy import exc
from sqlalchemy import func
from sqlalchemy import Integer
from sqlalchemy import MetaData
from sqlalchemy import select
from sqlalchemy import String
from sqlalchemy import testing
from sqlalchemy import text
from sqlalchemy.ext.declarative import comparable_using
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import aliased
from sqlalchemy.orm import AttributeExtension
from sqlalchemy.orm import attributes
from sqlalchemy.orm import collections
from sqlalchemy.orm import column_property
from sqlalchemy.orm import comparable_property
from sqlalchemy.orm import composite
from sqlalchemy.orm import configure_mappers
from sqlalchemy.orm import contains_eager
from sqlalchemy.orm import create_session
from sqlalchemy.orm import defer
from sqlalchemy.orm import deferred
from sqlalchemy.orm import EXT_CONTINUE
from sqlalchemy.orm import identity
from sqlalchemy.orm import instrumentation
from sqlalchemy.orm import joinedload
from sqlalchemy.orm import joinedload_all
from sqlalchemy.orm import mapper
from sqlalchemy.orm import MapperExtension
from sqlalchemy.orm import PropComparator
from sqlalchemy.orm import relationship
from sqlalchemy.orm import Session
from sqlalchemy.orm import SessionExtension
from sqlalchemy.orm import sessionmaker
from sqlalchemy.orm import synonym
from sqlalchemy.orm import undefer
from sqlalchemy.orm import with_polymorphic
from sqlalchemy.orm.collections import collection
from sqlalchemy.orm.util import polymorphic_union
from sqlalchemy.testing import assert_raises
from sqlalchemy.testing import assert_raises_message
from sqlalchemy.testing import assertions
from sqlalchemy.testing import AssertsCompiledSQL
from sqlalchemy.testing import eq_
from sqlalchemy.testing import fixtures
from sqlalchemy.testing import is_
from sqlalchemy.testing import is_true
from sqlalchemy.testing.schema import Column
from sqlalchemy.testing.schema import Table
from sqlalchemy.testing.util import gc_collect
from sqlalchemy.util.compat import pypy
from . import _fixtures
from .inheritance import _poly_fixtures
from .test_options import PathTest as OptionsPathTest
from .test_transaction import _LocalFixture
class DeprecationWarningsTest(fixtures.DeclarativeMappedTest):
run_setup_classes = "each"
run_setup_mappers = "each"
run_define_tables = "each"
run_create_tables = None
def test_attribute_extension(self):
class SomeExtension(AttributeExtension):
def append(self, obj, value, initiator):
pass
def remove(self, obj, value, initiator):
pass
def set(self, obj, value, oldvalue, initiator):
pass
with assertions.expect_deprecated(
".*The column_property.extension parameter will be removed in a "
"future release."
):
class Foo(self.DeclarativeBasic):
__tablename__ = "foo"
id = Column(Integer, primary_key=True)
foo = column_property(
Column("q", Integer), extension=SomeExtension()
)
with assertions.expect_deprecated(
"AttributeExtension.append is deprecated. The "
"AttributeExtension class will be removed in a future release.",
"AttributeExtension.remove is deprecated. The "
"AttributeExtension class will be removed in a future release.",
"AttributeExtension.set is deprecated. The "
"AttributeExtension class will be removed in a future release.",
):
configure_mappers()
def test_attribute_extension_parameter(self):
class SomeExtension(AttributeExtension):
def append(self, obj, value, initiator):
pass
with assertions.expect_deprecated(
".*The relationship.extension parameter will be removed in a "
"future release."
):
relationship("Bar", extension=SomeExtension)
with assertions.expect_deprecated(
".*The column_property.extension parameter will be removed in a "
"future release."
):
column_property(Column("q", Integer), extension=SomeExtension)
with assertions.expect_deprecated(
".*The composite.extension parameter will be removed in a "
"future release."
):
composite("foo", extension=SomeExtension)
def test_session_extension(self):
class SomeExtension(SessionExtension):
def after_commit(self, session):
pass
def after_rollback(self, session):
pass
def before_flush(self, session, flush_context, instances):
pass
with assertions.expect_deprecated(
".*The Session.extension parameter will be removed",
"SessionExtension.after_commit is deprecated. "
"The SessionExtension class",
"SessionExtension.before_flush is deprecated. "
"The SessionExtension class",
"SessionExtension.after_rollback is deprecated. "
"The SessionExtension class",
):
Session(extension=SomeExtension())
def test_mapper_extension(self):
class SomeExtension(MapperExtension):
def init_instance(
self, mapper, class_, oldinit, instance, args, kwargs
):
pass
def init_failed(
self, mapper, class_, oldinit, instance, args, kwargs
):
pass
with assertions.expect_deprecated(
"MapperExtension.init_instance is deprecated. "
"The MapperExtension class",
"MapperExtension.init_failed is deprecated. "
"The MapperExtension class",
".*The mapper.extension parameter will be removed",
):
class Foo(self.DeclarativeBasic):
__tablename__ = "foo"
id = Column(Integer, primary_key=True)
__mapper_args__ = {"extension": SomeExtension()}
def test_session_weak_identity_map(self):
with testing.expect_deprecated(
".*Session.weak_identity_map parameter as well as the"
):
s = Session(weak_identity_map=True)
is_(s._identity_cls, identity.WeakInstanceDict)
with assertions.expect_deprecated(
"The Session.weak_identity_map parameter as well as"
):
s = Session(weak_identity_map=False)
is_(s._identity_cls, identity.StrongInstanceDict)
s = Session()
is_(s._identity_cls, identity.WeakInstanceDict)
def test_session_prune(self):
s = Session()
with assertions.expect_deprecated(
r"The Session.prune\(\) method is deprecated along with "
"Session.weak_identity_map"
):
s.prune()
def test_session_enable_transaction_accounting(self):
with assertions.expect_deprecated(
"the Session._enable_transaction_accounting parameter is "
"deprecated"
):
Session(_enable_transaction_accounting=False)
def test_session_is_modified(self):
class Foo(self.DeclarativeBasic):
__tablename__ = "foo"
id = Column(Integer, primary_key=True)
f1 = Foo()
s = Session()
with assertions.expect_deprecated(
"The Session.is_modified.passive flag is deprecated"
):
# this flag was for a long time documented as requiring
# that it be set to True, so we've changed the default here
# so that the warning emits
s.is_modified(f1, passive=True)
class DeprecatedAccountingFlagsTest(_LocalFixture):
def test_rollback_no_accounting(self):
User, users = self.classes.User, self.tables.users
with testing.expect_deprecated(
"The Session._enable_transaction_accounting parameter"
):
sess = sessionmaker(_enable_transaction_accounting=False)()
u1 = User(name="ed")
sess.add(u1)
sess.commit()
u1.name = "edwardo"
sess.rollback()
testing.db.execute(
users.update(users.c.name == "ed").values(name="edward")
)
assert u1.name == "edwardo"
sess.expire_all()
assert u1.name == "edward"
def test_commit_no_accounting(self):
User, users = self.classes.User, self.tables.users
with testing.expect_deprecated(
"The Session._enable_transaction_accounting parameter"
):
sess = sessionmaker(_enable_transaction_accounting=False)()
u1 = User(name="ed")
sess.add(u1)
sess.commit()
u1.name = "edwardo"
sess.rollback()
testing.db.execute(
users.update(users.c.name == "ed").values(name="edward")
)
assert u1.name == "edwardo"
sess.commit()
assert testing.db.execute(select([users.c.name])).fetchall() == [
("edwardo",)
]
assert u1.name == "edwardo"
sess.delete(u1)
sess.commit()
def test_preflush_no_accounting(self):
User, users = self.classes.User, self.tables.users
with testing.expect_deprecated(
"The Session._enable_transaction_accounting parameter"
):
sess = Session(
_enable_transaction_accounting=False,
autocommit=True,
autoflush=False,
)
u1 = User(name="ed")
sess.add(u1)
sess.flush()
sess.begin()
u1.name = "edwardo"
u2 = User(name="some other user")
sess.add(u2)
sess.rollback()
sess.begin()
assert testing.db.execute(select([users.c.name])).fetchall() == [
("ed",)
]
class DeprecatedSessionFeatureTest(_fixtures.FixtureTest):
run_inserts = None
def test_fast_discard_race(self):
# test issue #4068
users, User = self.tables.users, self.classes.User
mapper(User, users)
with testing.expect_deprecated(".*identity map are deprecated"):
sess = Session(weak_identity_map=False)
u1 = User(name="u1")
sess.add(u1)
sess.commit()
u1_state = u1._sa_instance_state
sess.identity_map._dict.pop(u1_state.key)
ref = u1_state.obj
u1_state.obj = lambda: None
u2 = sess.query(User).first()
u1_state._cleanup(ref)
u3 = sess.query(User).first()
is_(u2, u3)
u2_state = u2._sa_instance_state
assert sess.identity_map.contains_state(u2._sa_instance_state)
ref = u2_state.obj
u2_state.obj = lambda: None
u2_state._cleanup(ref)
assert not sess.identity_map.contains_state(u2._sa_instance_state)
def test_is_modified_passive_on(self):
User, Address = self.classes.User, self.classes.Address
users, addresses = self.tables.users, self.tables.addresses
mapper(User, users, properties={"addresses": relationship(Address)})
mapper(Address, addresses)
s = Session()
u = User(name="fred", addresses=[Address(email_address="foo")])
s.add(u)
s.commit()
u.id
def go():
assert not s.is_modified(u, passive=True)
with testing.expect_deprecated(
".*Session.is_modified.passive flag is deprecated "
):
self.assert_sql_count(testing.db, go, 0)
u.name = "newname"
def go():
assert s.is_modified(u, passive=True)
with testing.expect_deprecated(
".*Session.is_modified.passive flag is deprecated "
):
self.assert_sql_count(testing.db, go, 0)
class StrongIdentityMapTest(_fixtures.FixtureTest):
run_inserts = None
def _strong_ident_fixture(self):
with testing.expect_deprecated(
".*Session.weak_identity_map parameter as well as the"
):
sess = create_session(weak_identity_map=False)
def prune():
with testing.expect_deprecated(".*Session.prune"):
return sess.prune()
return sess, prune
def _event_fixture(self):
session = create_session()
@event.listens_for(session, "pending_to_persistent")
@event.listens_for(session, "deleted_to_persistent")
@event.listens_for(session, "detached_to_persistent")
@event.listens_for(session, "loaded_as_persistent")
def strong_ref_object(sess, instance):
if "refs" not in sess.info:
sess.info["refs"] = refs = set()
else:
refs = sess.info["refs"]
refs.add(instance)
@event.listens_for(session, "persistent_to_detached")
@event.listens_for(session, "persistent_to_deleted")
@event.listens_for(session, "persistent_to_transient")
def deref_object(sess, instance):
sess.info["refs"].discard(instance)
def prune():
if "refs" not in session.info:
return 0
sess_size = len(session.identity_map)
session.info["refs"].clear()
gc_collect()
session.info["refs"] = set(
s.obj() for s in session.identity_map.all_states()
)
return sess_size - len(session.identity_map)
return session, prune
def test_strong_ref_imap(self):
self._test_strong_ref(self._strong_ident_fixture)
def test_strong_ref_events(self):
self._test_strong_ref(self._event_fixture)
def _test_strong_ref(self, fixture):
s, prune = fixture()
users, User = self.tables.users, self.classes.User
mapper(User, users)
# save user
s.add(User(name="u1"))
s.flush()
user = s.query(User).one()
user = None
print(s.identity_map)
gc_collect()
assert len(s.identity_map) == 1
user = s.query(User).one()
assert not s.identity_map._modified
user.name = "u2"
assert s.identity_map._modified
s.flush()
eq_(users.select().execute().fetchall(), [(user.id, "u2")])
def test_prune_imap(self):
self._test_prune(self._strong_ident_fixture)
def test_prune_events(self):
self._test_prune(self._event_fixture)
@testing.fails_if(lambda: pypy, "pypy has a real GC")
@testing.fails_on("+zxjdbc", "http://www.sqlalchemy.org/trac/ticket/1473")
def _test_prune(self, fixture):
s, prune = fixture()
users, User = self.tables.users, self.classes.User
mapper(User, users)
for o in [User(name="u%s" % x) for x in range(10)]:
s.add(o)
# o is still live after this loop...
self.assert_(len(s.identity_map) == 0)
eq_(prune(), 0)
s.flush()
gc_collect()
eq_(prune(), 9)
# o is still in local scope here, so still present
self.assert_(len(s.identity_map) == 1)
id_ = o.id
del o
eq_(prune(), 1)
self.assert_(len(s.identity_map) == 0)
u = s.query(User).get(id_)
eq_(prune(), 0)
self.assert_(len(s.identity_map) == 1)
u.name = "squiznart"
del u
eq_(prune(), 0)
self.assert_(len(s.identity_map) == 1)
s.flush()
eq_(prune(), 1)
self.assert_(len(s.identity_map) == 0)
s.add(User(name="x"))
eq_(prune(), 0)
self.assert_(len(s.identity_map) == 0)
s.flush()
self.assert_(len(s.identity_map) == 1)
eq_(prune(), 1)
self.assert_(len(s.identity_map) == 0)
u = s.query(User).get(id_)
s.delete(u)
del u
eq_(prune(), 0)
self.assert_(len(s.identity_map) == 1)
s.flush()
eq_(prune(), 0)
self.assert_(len(s.identity_map) == 0)
class DeprecatedQueryTest(_fixtures.FixtureTest, AssertsCompiledSQL):
__dialect__ = "default"
run_setup_mappers = "once"
run_inserts = "once"
run_deletes = None
@classmethod
def setup_mappers(cls):
cls._setup_stock_mapping()
@classmethod
def _expect_implicit_subquery(cls):
return assertions.expect_deprecated(
"Implicit coercion of SELECT and textual SELECT constructs into "
r"FROM clauses is deprecated; please call \.subquery\(\) on any "
"Core select or ORM Query object in order to produce a "
"subquery object."
)
def test_via_textasfrom_select_from(self):
User = self.classes.User
s = create_session()
with self._expect_implicit_subquery():
eq_(
s.query(User)
.select_from(
text("select * from users").columns(
id=Integer, name=String
)
)
.order_by(User.id)
.all(),
[User(id=7), User(id=8), User(id=9), User(id=10)],
)
def test_query_as_scalar(self):
User = self.classes.User
s = Session()
with assertions.expect_deprecated(
r"The Query.as_scalar\(\) method is deprecated and will "
"be removed in a future release."
):
s.query(User).as_scalar()
def test_select_entity_from_crit(self):
User, users = self.classes.User, self.tables.users
sel = users.select()
sess = create_session()
with self._expect_implicit_subquery():
eq_(
sess.query(User)
.select_entity_from(sel)
.filter(User.id.in_([7, 8]))
.all(),
[User(name="jack", id=7), User(name="ed", id=8)],
)
def test_select_entity_from_select(self):
User, users = self.classes.User, self.tables.users
sess = create_session()
with self._expect_implicit_subquery():
self.assert_compile(
sess.query(User.name).select_entity_from(
users.select().where(users.c.id > 5)
),
"SELECT anon_1.name AS anon_1_name FROM "
"(SELECT users.id AS id, users.name AS name FROM users "
"WHERE users.id > :id_1) AS anon_1",
)
def test_select_entity_from_q_statement(self):
User = self.classes.User
sess = create_session()
q = sess.query(User)
with self._expect_implicit_subquery():
q = sess.query(User).select_entity_from(q.statement)
self.assert_compile(
q.filter(User.name == "ed"),
"SELECT anon_1.id AS anon_1_id, anon_1.name AS anon_1_name "
"FROM (SELECT users.id AS id, users.name AS name FROM "
"users) AS anon_1 WHERE anon_1.name = :name_1",
)
def test_select_from_q_statement_no_aliasing(self):
User = self.classes.User
sess = create_session()
q = sess.query(User)
with self._expect_implicit_subquery():
q = sess.query(User).select_from(q.statement)
self.assert_compile(
q.filter(User.name == "ed"),
"SELECT users.id AS users_id, users.name AS users_name "
"FROM users, (SELECT users.id AS id, users.name AS name FROM "
"users) AS anon_1 WHERE users.name = :name_1",
)
def test_from_alias_three(self):
User, addresses, users = (
self.classes.User,
self.tables.addresses,
self.tables.users,
)
query = (
users.select(users.c.id == 7)
.union(users.select(users.c.id > 7))
.alias("ulist")
.outerjoin(addresses)
.select(
use_labels=True, order_by=[text("ulist.id"), addresses.c.id]
)
)
sess = create_session()
# better way. use select_entity_from()
def go():
with self._expect_implicit_subquery():
result = (
sess.query(User)
.select_entity_from(query)
.options(contains_eager("addresses"))
.all()
)
assert self.static.user_address_result == result
self.assert_sql_count(testing.db, go, 1)
def test_from_alias_four(self):
User, addresses, users = (
self.classes.User,
self.tables.addresses,
self.tables.users,
)
sess = create_session()
# same thing, but alias addresses, so that the adapter
# generated by select_entity_from() is wrapped within
# the adapter created by contains_eager()
adalias = addresses.alias()
query = (
users.select(users.c.id == 7)
.union(users.select(users.c.id > 7))
.alias("ulist")
.outerjoin(adalias)
.select(use_labels=True, order_by=[text("ulist.id"), adalias.c.id])
)
def go():
with self._expect_implicit_subquery():
result = (
sess.query(User)
.select_entity_from(query)
.options(contains_eager("addresses", alias=adalias))
.all()
)
assert self.static.user_address_result == result
self.assert_sql_count(testing.db, go, 1)
def test_select(self):
users = self.tables.users
sess = create_session()
with self._expect_implicit_subquery():
self.assert_compile(
sess.query(users)
.select_entity_from(users.select())
.with_labels()
.statement,
"SELECT users.id AS users_id, users.name AS users_name "
"FROM users, "
"(SELECT users.id AS id, users.name AS name FROM users) "
"AS anon_1",
)
def test_join(self):
users, Address, User = (
self.tables.users,
self.classes.Address,
self.classes.User,
)
# mapper(User, users, properties={"addresses": relationship(Address)})
# mapper(Address, addresses)
sel = users.select(users.c.id.in_([7, 8]))
sess = create_session()
with self._expect_implicit_subquery():
result = (
sess.query(User)
.select_entity_from(sel)
.join("addresses")
.add_entity(Address)
.order_by(User.id)
.order_by(Address.id)
.all()
)
eq_(
result,
[
(
User(name="jack", id=7),
Address(user_id=7, email_address="jack@bean.com", id=1),
),
(
User(name="ed", id=8),
Address(user_id=8, email_address="ed@wood.com", id=2),
),
(
User(name="ed", id=8),
Address(user_id=8, email_address="ed@bettyboop.com", id=3),
),
(
User(name="ed", id=8),
Address(user_id=8, email_address="ed@lala.com", id=4),
),
],
)
adalias = aliased(Address)
with self._expect_implicit_subquery():
result = (
sess.query(User)
.select_entity_from(sel)
.join(adalias, "addresses")
.add_entity(adalias)
.order_by(User.id)
.order_by(adalias.id)
.all()
)
eq_(
result,
[
(
User(name="jack", id=7),
Address(user_id=7, email_address="jack@bean.com", id=1),
),
(
User(name="ed", id=8),
Address(user_id=8, email_address="ed@wood.com", id=2),
),
(
User(name="ed", id=8),
Address(user_id=8, email_address="ed@bettyboop.com", id=3),
),
(
User(name="ed", id=8),
Address(user_id=8, email_address="ed@lala.com", id=4),
),
],
)
def test_more_joins(self):
(users, Keyword, User) = (
self.tables.users,
self.classes.Keyword,
self.classes.User,
)
sess = create_session()
sel = users.select(users.c.id.in_([7, 8]))
with self._expect_implicit_subquery():
eq_(
sess.query(User)
.select_entity_from(sel)
.join("orders", "items", "keywords")
.filter(Keyword.name.in_(["red", "big", "round"]))
.all(),
[User(name="jack", id=7)],
)
with self._expect_implicit_subquery():
eq_(
sess.query(User)
.select_entity_from(sel)
.join("orders", "items", "keywords", aliased=True)
.filter(Keyword.name.in_(["red", "big", "round"]))
.all(),
[User(name="jack", id=7)],
)
def test_join_no_order_by(self):
User, users = self.classes.User, self.tables.users
sel = users.select(users.c.id.in_([7, 8]))
sess = create_session()
with self._expect_implicit_subquery():
eq_(
sess.query(User).select_entity_from(sel).all(),
[User(name="jack", id=7), User(name="ed", id=8)],
)
def test_replace_with_eager(self):
users, Address, User = (
self.tables.users,
self.classes.Address,
self.classes.User,
)
sel = users.select(users.c.id.in_([7, 8]))
sess = create_session()
def go():
with self._expect_implicit_subquery():
eq_(
sess.query(User)
.options(joinedload("addresses"))
.select_entity_from(sel)
.order_by(User.id)
.all(),
[
User(id=7, addresses=[Address(id=1)]),
User(
id=8,
addresses=[
Address(id=2),
Address(id=3),
Address(id=4),
],
),
],
)
self.assert_sql_count(testing.db, go, 1)
sess.expunge_all()
def go():
with self._expect_implicit_subquery():
eq_(
sess.query(User)
.options(joinedload("addresses"))
.select_entity_from(sel)
.filter(User.id == 8)
.order_by(User.id)
.all(),
[
User(
id=8,
addresses=[
Address(id=2),
Address(id=3),
Address(id=4),
],
)
],
)
self.assert_sql_count(testing.db, go, 1)
sess.expunge_all()
def go():
with self._expect_implicit_subquery():
eq_(
sess.query(User)
.options(joinedload("addresses"))
.select_entity_from(sel)
.order_by(User.id)[1],
User(
id=8,
addresses=[
Address(id=2),
Address(id=3),
Address(id=4),
],
),
)
self.assert_sql_count(testing.db, go, 1)
def test_onclause_conditional_adaption(self):
Item, Order, orders, order_items, User = (
self.classes.Item,
self.classes.Order,
self.tables.orders,
self.tables.order_items,
self.classes.User,
)
sess = Session()
oalias = orders.select()
with self._expect_implicit_subquery():
self.assert_compile(
sess.query(User)
.join(oalias, User.orders)
.join(
Item,
and_(
Order.id == order_items.c.order_id,
order_items.c.item_id == Item.id,
),
from_joinpoint=True,
),
"SELECT users.id AS users_id, users.name AS users_name "
"FROM users JOIN "
"(SELECT orders.id AS id, orders.user_id AS user_id, "
"orders.address_id AS address_id, orders.description "
"AS description, orders.isopen AS isopen FROM orders) "
"AS anon_1 ON users.id = anon_1.user_id JOIN items "
"ON anon_1.id = order_items.order_id "
"AND order_items.item_id = items.id",
use_default_dialect=True,
)
class DeprecatedInhTest(_poly_fixtures._Polymorphic):
def test_with_polymorphic(self):
Person = _poly_fixtures.Person
Engineer = _poly_fixtures.Engineer
with DeprecatedQueryTest._expect_implicit_subquery():
p_poly = with_polymorphic(Person, [Engineer], select([Person]))
is_true(
sa.inspect(p_poly).selectable.compare(select([Person]).subquery())
)
def test_multiple_adaption(self):
"""test that multiple filter() adapters get chained together "
and work correctly within a multiple-entry join()."""
Company = _poly_fixtures.Company
Machine = _poly_fixtures.Machine
Engineer = _poly_fixtures.Engineer
people = self.tables.people
engineers = self.tables.engineers
machines = self.tables.machines
sess = create_session()
mach_alias = machines.select()
with DeprecatedQueryTest._expect_implicit_subquery():
self.assert_compile(
sess.query(Company)
.join(people.join(engineers), Company.employees)
.join(mach_alias, Engineer.machines, from_joinpoint=True)
.filter(Engineer.name == "dilbert")
.filter(Machine.name == "foo"),
"SELECT companies.company_id AS companies_company_id, "
"companies.name AS companies_name "
"FROM companies JOIN (people "
"JOIN engineers ON people.person_id = "
"engineers.person_id) ON companies.company_id = "
"people.company_id JOIN "
"(SELECT machines.machine_id AS machine_id, "
"machines.name AS name, "
"machines.engineer_id AS engineer_id "
"FROM machines) AS anon_1 "
"ON engineers.person_id = anon_1.engineer_id "
"WHERE people.name = :name_1 AND anon_1.name = :name_2",
use_default_dialect=True,
)
class DeprecatedMapperTest(_fixtures.FixtureTest, AssertsCompiledSQL):
__dialect__ = "default"
def test_polymorphic_union_w_select(self):
users, addresses = self.tables.users, self.tables.addresses
with DeprecatedQueryTest._expect_implicit_subquery():
dep = polymorphic_union(
{"u": users.select(), "a": addresses.select()},
"type",
"bcjoin",
)
subq_version = polymorphic_union(
{
"u": users.select().subquery(),
"a": addresses.select().subquery(),
},
"type",
"bcjoin",
)
is_true(dep.compare(subq_version))
def test_cancel_order_by(self):
users, User = self.tables.users, self.classes.User
with testing.expect_deprecated(
"The Mapper.order_by parameter is deprecated, and will be "
"removed in a future release."
):
mapper(User, users, order_by=users.c.name.desc())
assert (
"order by users.name desc"
in str(create_session().query(User).statement).lower()
)
assert (
"order by"
not in str(
create_session().query(User).order_by(None).statement
).lower()
)
assert (
"order by users.name asc"
in str(
create_session()
.query(User)
.order_by(User.name.asc())
.statement
).lower()
)
eq_(
create_session().query(User).all(),
[
User(id=7, name="jack"),
User(id=9, name="fred"),
User(id=8, name="ed"),
User(id=10, name="chuck"),
],
)
eq_(
create_session().query(User).order_by(User.name).all(),
[
User(id=10, name="chuck"),
User(id=8, name="ed"),
User(id=9, name="fred"),
User(id=7, name="jack"),
],
)
def test_comparable(self):
users = self.tables.users
class extendedproperty(property):
attribute = 123
def method1(self):
return "method1"
from sqlalchemy.orm.properties import ColumnProperty
class UCComparator(ColumnProperty.Comparator):
__hash__ = None
def method1(self):
return "uccmethod1"
def method2(self, other):
return "method2"
def __eq__(self, other):
cls = self.prop.parent.class_
col = getattr(cls, "name")
if other is None:
return col is None
else:
return sa.func.upper(col) == sa.func.upper(other)
def map_(with_explicit_property):
class User(object):
@extendedproperty
def uc_name(self):
if self.name is None:
return None
return self.name.upper()
if with_explicit_property:
args = (UCComparator, User.uc_name)
else:
args = (UCComparator,)
with assertions.expect_deprecated(
r"comparable_property\(\) is deprecated and will be "
"removed in a future release."
):
mapper(
User,
users,
properties=dict(uc_name=sa.orm.comparable_property(*args)),
)
return User
for User in (map_(True), map_(False)):
sess = create_session()
sess.begin()
q = sess.query(User)
assert hasattr(User, "name")
assert hasattr(User, "uc_name")
eq_(User.uc_name.method1(), "method1")
eq_(User.uc_name.method2("x"), "method2")
assert_raises_message(
AttributeError,
"Neither 'extendedproperty' object nor 'UCComparator' "
"object associated with User.uc_name has an attribute "
"'nonexistent'",
getattr,
User.uc_name,
"nonexistent",
)
# test compile
assert not isinstance(User.uc_name == "jack", bool)
u = q.filter(User.uc_name == "JACK").one()
assert u.uc_name == "JACK"
assert u not in sess.dirty
u.name = "some user name"
eq_(u.name, "some user name")
assert u in sess.dirty
eq_(u.uc_name, "SOME USER NAME")
sess.flush()
sess.expunge_all()
q = sess.query(User)
u2 = q.filter(User.name == "some user name").one()
u3 = q.filter(User.uc_name == "SOME USER NAME").one()
assert u2 is u3
eq_(User.uc_name.attribute, 123)
sess.rollback()
def test_comparable_column(self):
users, User = self.tables.users, self.classes.User
class MyComparator(sa.orm.properties.ColumnProperty.Comparator):
__hash__ = None
def __eq__(self, other):
# lower case comparison
return func.lower(self.__clause_element__()) == func.lower(
other
)
def intersects(self, other):
# non-standard comparator
return self.__clause_element__().op("&=")(other)
mapper(
User,
users,
properties={
"name": sa.orm.column_property(
users.c.name, comparator_factory=MyComparator
)
},
)
assert_raises_message(
AttributeError,
"Neither 'InstrumentedAttribute' object nor "
"'MyComparator' object associated with User.name has "
"an attribute 'nonexistent'",
getattr,
User.name,
"nonexistent",
)
eq_(
str(
(User.name == "ed").compile(
dialect=sa.engine.default.DefaultDialect()
)
),
"lower(users.name) = lower(:lower_1)",
)
eq_(
str(
(User.name.intersects("ed")).compile(
dialect=sa.engine.default.DefaultDialect()
)
),
"users.name &= :name_1",
)
def test_info(self):
class MyComposite(object):
pass
with assertions.expect_deprecated(
r"comparable_property\(\) is deprecated and will be "
"removed in a future release."
):
for constructor, args in [(comparable_property, "foo")]:
obj = constructor(info={"x": "y"}, *args)
eq_(obj.info, {"x": "y"})
obj.info["q"] = "p"
eq_(obj.info, {"x": "y", "q": "p"})
obj = constructor(*args)
eq_(obj.info, {})
obj.info["q"] = "p"
eq_(obj.info, {"q": "p"})
def test_add_property(self):
users = self.tables.users
assert_col = []
class User(fixtures.ComparableEntity):
def _get_name(self):
assert_col.append(("get", self._name))
return self._name
def _set_name(self, name):
assert_col.append(("set", name))
self._name = name
name = property(_get_name, _set_name)
def _uc_name(self):
if self._name is None:
return None
return self._name.upper()
uc_name = property(_uc_name)
uc_name2 = property(_uc_name)
m = mapper(User, users)
class UCComparator(PropComparator):
__hash__ = None
def __eq__(self, other):
cls = self.prop.parent.class_
col = getattr(cls, "name")
if other is None:
return col is None
else:
return func.upper(col) == func.upper(other)
m.add_property("_name", deferred(users.c.name))
m.add_property("name", synonym("_name"))
with assertions.expect_deprecated(
r"comparable_property\(\) is deprecated and will be "
"removed in a future release."
):
m.add_property("uc_name", comparable_property(UCComparator))
m.add_property(
"uc_name2", comparable_property(UCComparator, User.uc_name2)
)
sess = create_session(autocommit=False)
assert sess.query(User).get(7)
u = sess.query(User).filter_by(name="jack").one()
def go():
eq_(u.name, "jack")
eq_(u.uc_name, "JACK")
eq_(u.uc_name2, "JACK")
eq_(assert_col, [("get", "jack")], str(assert_col))
self.sql_count_(1, go)
def test_kwarg_accepted(self):
class DummyComposite(object):
def __init__(self, x, y):
pass
class MyFactory(PropComparator):
pass
with assertions.expect_deprecated(
r"comparable_property\(\) is deprecated and will be "
"removed in a future release."
):
for args in ((comparable_property,),):
fn = args[0]
args = args[1:]
fn(comparator_factory=MyFactory, *args)
def test_merge_synonym_comparable(self):
users = self.tables.users
class User(object):
class Comparator(PropComparator):
pass
def _getValue(self):
return self._value
def _setValue(self, value):
setattr(self, "_value", value)
value = property(_getValue, _setValue)
with assertions.expect_deprecated(
r"comparable_property\(\) is deprecated and will be "
"removed in a future release."
):
mapper(
User,
users,
properties={
"uid": synonym("id"),
"foobar": comparable_property(User.Comparator, User.value),
},
)
sess = create_session()
u = User()
u.name = "ed"
sess.add(u)
sess.flush()
sess.expunge(u)
sess.merge(u)
class DeprecatedDeclTest(fixtures.TestBase):
@testing.provide_metadata
def test_comparable_using(self):
class NameComparator(sa.orm.PropComparator):
@property
def upperself(self):
cls = self.prop.parent.class_
col = getattr(cls, "name")
return sa.func.upper(col)
def operate(self, op, other, **kw):
return op(self.upperself, other, **kw)
Base = declarative_base(metadata=self.metadata)
with testing.expect_deprecated(
r"comparable_property\(\) is deprecated and will be "
"removed in a future release."
):
class User(Base, fixtures.ComparableEntity):
__tablename__ = "users"
id = Column(
"id",
Integer,
primary_key=True,
test_needs_autoincrement=True,
)
name = Column("name", String(50))
@comparable_using(NameComparator)
@property
def uc_name(self):
return self.name is not None and self.name.upper() or None
Base.metadata.create_all()
sess = create_session()
u1 = User(name="someuser")
eq_(u1.name, "someuser", u1.name)
eq_(u1.uc_name, "SOMEUSER", u1.uc_name)
sess.add(u1)
sess.flush()
sess.expunge_all()
rt = sess.query(User).filter(User.uc_name == "SOMEUSER").one()
eq_(rt, u1)
sess.expunge_all()
rt = sess.query(User).filter(User.uc_name.startswith("SOMEUSE")).one()
eq_(rt, u1)
class DeprecatedMapperExtensionTest(_fixtures.FixtureTest):
"""Superseded by MapperEventsTest - test backwards
compatibility of MapperExtension."""
run_inserts = None
def extension(self):
methods = []
class Ext(MapperExtension):
def instrument_class(self, mapper, cls):
methods.append("instrument_class")
return EXT_CONTINUE
def init_instance(
self, mapper, class_, oldinit, instance, args, kwargs
):
methods.append("init_instance")
return EXT_CONTINUE
def init_failed(
self, mapper, class_, oldinit, instance, args, kwargs
):
methods.append("init_failed")
return EXT_CONTINUE
def reconstruct_instance(self, mapper, instance):
methods.append("reconstruct_instance")
return EXT_CONTINUE
def before_insert(self, mapper, connection, instance):
methods.append("before_insert")
return EXT_CONTINUE
def after_insert(self, mapper, connection, instance):
methods.append("after_insert")
return EXT_CONTINUE
def before_update(self, mapper, connection, instance):
methods.append("before_update")
return EXT_CONTINUE
def after_update(self, mapper, connection, instance):
methods.append("after_update")
return EXT_CONTINUE
def before_delete(self, mapper, connection, instance):
methods.append("before_delete")
return EXT_CONTINUE
def after_delete(self, mapper, connection, instance):
methods.append("after_delete")
return EXT_CONTINUE
return Ext, methods
def test_basic(self):
"""test that common user-defined methods get called."""
User, users = self.classes.User, self.tables.users
Ext, methods = self.extension()
with testing.expect_deprecated(
"MapperExtension is deprecated in favor of the MapperEvents",
"MapperExtension.before_insert is deprecated",
"MapperExtension.instrument_class is deprecated",
"MapperExtension.init_instance is deprecated",
"MapperExtension.after_insert is deprecated",
"MapperExtension.reconstruct_instance is deprecated",
"MapperExtension.before_delete is deprecated",
"MapperExtension.after_delete is deprecated",
"MapperExtension.before_update is deprecated",
"MapperExtension.after_update is deprecated",
"MapperExtension.init_failed is deprecated",
):
mapper(User, users, extension=Ext())
sess = create_session()
u = User(name="u1")
sess.add(u)
sess.flush()
u = sess.query(User).populate_existing().get(u.id)
sess.expunge_all()
u = sess.query(User).get(u.id)
u.name = "u1 changed"
sess.flush()
sess.delete(u)
sess.flush()
eq_(
methods,
[
"instrument_class",
"init_instance",
"before_insert",
"after_insert",
"reconstruct_instance",
"before_update",
"after_update",
"before_delete",
"after_delete",
],
)
def test_inheritance(self):
users, addresses, User = (
self.tables.users,
self.tables.addresses,
self.classes.User,
)
Ext, methods = self.extension()
class AdminUser(User):
pass
with testing.expect_deprecated(
"MapperExtension is deprecated in favor of the MapperEvents",
"MapperExtension.before_insert is deprecated",
"MapperExtension.instrument_class is deprecated",
"MapperExtension.init_instance is deprecated",
"MapperExtension.after_insert is deprecated",
"MapperExtension.reconstruct_instance is deprecated",
"MapperExtension.before_delete is deprecated",
"MapperExtension.after_delete is deprecated",
"MapperExtension.before_update is deprecated",
"MapperExtension.after_update is deprecated",
"MapperExtension.init_failed is deprecated",
):
mapper(User, users, extension=Ext())
mapper(
AdminUser,
addresses,
inherits=User,
properties={"address_id": addresses.c.id},
)
sess = create_session()
am = AdminUser(name="au1", email_address="au1@e1")
sess.add(am)
sess.flush()
am = sess.query(AdminUser).populate_existing().get(am.id)
sess.expunge_all()
am = sess.query(AdminUser).get(am.id)
am.name = "au1 changed"
sess.flush()
sess.delete(am)
sess.flush()
eq_(
methods,
[
"instrument_class",
"instrument_class",
"init_instance",
"before_insert",
"after_insert",
"reconstruct_instance",
"before_update",
"after_update",
"before_delete",
"after_delete",
],
)
def test_before_after_only_collection(self):
"""before_update is called on parent for collection modifications,
after_update is called even if no columns were updated.
"""
keywords, items, item_keywords, Keyword, Item = (
self.tables.keywords,
self.tables.items,
self.tables.item_keywords,
self.classes.Keyword,
self.classes.Item,
)
Ext1, methods1 = self.extension()
Ext2, methods2 = self.extension()
with testing.expect_deprecated(
"MapperExtension is deprecated in favor of the MapperEvents",
"MapperExtension.before_insert is deprecated",
"MapperExtension.instrument_class is deprecated",
"MapperExtension.init_instance is deprecated",
"MapperExtension.after_insert is deprecated",
"MapperExtension.reconstruct_instance is deprecated",
"MapperExtension.before_delete is deprecated",
"MapperExtension.after_delete is deprecated",
"MapperExtension.before_update is deprecated",
"MapperExtension.after_update is deprecated",
"MapperExtension.init_failed is deprecated",
):
mapper(
Item,
items,
extension=Ext1(),
properties={
"keywords": relationship(Keyword, secondary=item_keywords)
},
)
with testing.expect_deprecated(
"MapperExtension is deprecated in favor of the MapperEvents",
"MapperExtension.before_insert is deprecated",
"MapperExtension.instrument_class is deprecated",
"MapperExtension.init_instance is deprecated",
"MapperExtension.after_insert is deprecated",
"MapperExtension.reconstruct_instance is deprecated",
"MapperExtension.before_delete is deprecated",
"MapperExtension.after_delete is deprecated",
"MapperExtension.before_update is deprecated",
"MapperExtension.after_update is deprecated",
"MapperExtension.init_failed is deprecated",
):
mapper(Keyword, keywords, extension=Ext2())
sess = create_session()
i1 = Item(description="i1")
k1 = Keyword(name="k1")
sess.add(i1)
sess.add(k1)
sess.flush()
eq_(
methods1,
[
"instrument_class",
"init_instance",
"before_insert",
"after_insert",
],
)
eq_(
methods2,
[
"instrument_class",
"init_instance",
"before_insert",
"after_insert",
],
)
del methods1[:]
del methods2[:]
i1.keywords.append(k1)
sess.flush()
eq_(methods1, ["before_update", "after_update"])
eq_(methods2, [])
def test_inheritance_with_dupes(self):
"""Inheritance with the same extension instance on both mappers."""
users, addresses, User = (
self.tables.users,
self.tables.addresses,
self.classes.User,
)
Ext, methods = self.extension()
class AdminUser(User):
pass
ext = Ext()
with testing.expect_deprecated(
"MapperExtension is deprecated in favor of the MapperEvents",
"MapperExtension.before_insert is deprecated",
"MapperExtension.instrument_class is deprecated",
"MapperExtension.init_instance is deprecated",
"MapperExtension.after_insert is deprecated",
"MapperExtension.reconstruct_instance is deprecated",
"MapperExtension.before_delete is deprecated",
"MapperExtension.after_delete is deprecated",
"MapperExtension.before_update is deprecated",
"MapperExtension.after_update is deprecated",
"MapperExtension.init_failed is deprecated",
):
mapper(User, users, extension=ext)
with testing.expect_deprecated(
"MapperExtension is deprecated in favor of the MapperEvents"
):
mapper(
AdminUser,
addresses,
inherits=User,
extension=ext,
properties={"address_id": addresses.c.id},
)
sess = create_session()
am = AdminUser(name="au1", email_address="au1@e1")
sess.add(am)
sess.flush()
am = sess.query(AdminUser).populate_existing().get(am.id)
sess.expunge_all()
am = sess.query(AdminUser).get(am.id)
am.name = "au1 changed"
sess.flush()
sess.delete(am)
sess.flush()
eq_(
methods,
[
"instrument_class",
"instrument_class",
"init_instance",
"before_insert",
"after_insert",
"reconstruct_instance",
"before_update",
"after_update",
"before_delete",
"after_delete",
],
)
def test_unnecessary_methods_not_evented(self):
users = self.tables.users
class MyExtension(MapperExtension):
def before_insert(self, mapper, connection, instance):
pass
class Foo(object):
pass
with testing.expect_deprecated(
"MapperExtension is deprecated in favor of the MapperEvents",
"MapperExtension.before_insert is deprecated",
):
m = mapper(Foo, users, extension=MyExtension())
assert not m.class_manager.dispatch.load
assert not m.dispatch.before_update
assert len(m.dispatch.before_insert) == 1
class DeprecatedSessionExtensionTest(_fixtures.FixtureTest):
run_inserts = None
def test_extension(self):
User, users = self.classes.User, self.tables.users
mapper(User, users)
log = []
class MyExt(SessionExtension):
def before_commit(self, session):
log.append("before_commit")
def after_commit(self, session):
log.append("after_commit")
def after_rollback(self, session):
log.append("after_rollback")
def before_flush(self, session, flush_context, objects):
log.append("before_flush")
def after_flush(self, session, flush_context):
log.append("after_flush")
def after_flush_postexec(self, session, flush_context):
log.append("after_flush_postexec")
def after_begin(self, session, transaction, connection):
log.append("after_begin")
def after_attach(self, session, instance):
log.append("after_attach")
def after_bulk_update(self, session, query, query_context, result):
log.append("after_bulk_update")
def after_bulk_delete(self, session, query, query_context, result):
log.append("after_bulk_delete")
with testing.expect_deprecated(
"SessionExtension is deprecated in favor of " "the SessionEvents",
"SessionExtension.before_commit is deprecated",
"SessionExtension.after_commit is deprecated",
"SessionExtension.after_begin is deprecated",
"SessionExtension.after_attach is deprecated",
"SessionExtension.before_flush is deprecated",
"SessionExtension.after_flush is deprecated",
"SessionExtension.after_flush_postexec is deprecated",
"SessionExtension.after_rollback is deprecated",
"SessionExtension.after_bulk_update is deprecated",
"SessionExtension.after_bulk_delete is deprecated",
):
sess = create_session(extension=MyExt())
u = User(name="u1")
sess.add(u)
sess.flush()
assert log == [
"after_attach",
"before_flush",
"after_begin",
"after_flush",
"after_flush_postexec",
"before_commit",
"after_commit",
]
log = []
with testing.expect_deprecated(
"SessionExtension is deprecated in favor of " "the SessionEvents",
"SessionExtension.before_commit is deprecated",
"SessionExtension.after_commit is deprecated",
"SessionExtension.after_begin is deprecated",
"SessionExtension.after_attach is deprecated",
"SessionExtension.before_flush is deprecated",
"SessionExtension.after_flush is deprecated",
"SessionExtension.after_flush_postexec is deprecated",
"SessionExtension.after_rollback is deprecated",
"SessionExtension.after_bulk_update is deprecated",
"SessionExtension.after_bulk_delete is deprecated",
):
sess = create_session(autocommit=False, extension=MyExt())
u = User(name="u1")
sess.add(u)
sess.flush()
assert log == [
"after_attach",
"before_flush",
"after_begin",
"after_flush",
"after_flush_postexec",
]
log = []
u.name = "ed"
sess.commit()
assert log == [
"before_commit",
"before_flush",
"after_flush",
"after_flush_postexec",
"after_commit",
]
log = []
sess.commit()
assert log == ["before_commit", "after_commit"]
log = []
sess.query(User).delete()
assert log == ["after_begin", "after_bulk_delete"]
log = []
sess.query(User).update({"name": "foo"})
assert log == ["after_bulk_update"]
log = []
with testing.expect_deprecated(
"SessionExtension is deprecated in favor of " "the SessionEvents",
"SessionExtension.before_commit is deprecated",
"SessionExtension.after_commit is deprecated",
"SessionExtension.after_begin is deprecated",
"SessionExtension.after_attach is deprecated",
"SessionExtension.before_flush is deprecated",
"SessionExtension.after_flush is deprecated",
"SessionExtension.after_flush_postexec is deprecated",
"SessionExtension.after_rollback is deprecated",
"SessionExtension.after_bulk_update is deprecated",
"SessionExtension.after_bulk_delete is deprecated",
):
sess = create_session(
autocommit=False, extension=MyExt(), bind=testing.db
)
sess.connection()
assert log == ["after_begin"]
sess.close()
def test_multiple_extensions(self):
User, users = self.classes.User, self.tables.users
log = []
class MyExt1(SessionExtension):
def before_commit(self, session):
log.append("before_commit_one")
class MyExt2(SessionExtension):
def before_commit(self, session):
log.append("before_commit_two")
mapper(User, users)
with testing.expect_deprecated(
"SessionExtension is deprecated in favor of " "the SessionEvents",
"SessionExtension.before_commit is deprecated",
):
sess = create_session(extension=[MyExt1(), MyExt2()])
u = User(name="u1")
sess.add(u)
sess.flush()
assert log == ["before_commit_one", "before_commit_two"]
def test_unnecessary_methods_not_evented(self):
class MyExtension(SessionExtension):
def before_commit(self, session):
pass
with testing.expect_deprecated(
"SessionExtension is deprecated in favor of " "the SessionEvents",
"SessionExtension.before_commit is deprecated.",
):
s = Session(extension=MyExtension())
assert not s.dispatch.after_commit
assert len(s.dispatch.before_commit) == 1
class DeprecatedAttributeExtensionTest1(fixtures.ORMTest):
def test_extension_commit_attr(self):
"""test that an extension which commits attribute history
maintains the end-result history.
This won't work in conjunction with some unitofwork extensions.
"""
class Foo(fixtures.BasicEntity):
pass
class Bar(fixtures.BasicEntity):
pass
class ReceiveEvents(AttributeExtension):
def __init__(self, key):
self.key = key
def append(self, state, child, initiator):
if commit:
state._commit_all(state.dict)
return child
def remove(self, state, child, initiator):
if commit:
state._commit_all(state.dict)
return child
def set(self, state, child, oldchild, initiator):
if commit:
state._commit_all(state.dict)
return child
instrumentation.register_class(Foo)
instrumentation.register_class(Bar)
b1, b2, b3, b4 = Bar(id="b1"), Bar(id="b2"), Bar(id="b3"), Bar(id="b4")
def loadcollection(state, passive):
if passive is attributes.PASSIVE_NO_FETCH:
return attributes.PASSIVE_NO_RESULT
return [b1, b2]
def loadscalar(state, passive):
if passive is attributes.PASSIVE_NO_FETCH:
return attributes.PASSIVE_NO_RESULT
return b2
with testing.expect_deprecated(
"AttributeExtension.append is deprecated.",
"AttributeExtension.remove is deprecated.",
"AttributeExtension.set is deprecated.",
):
attributes.register_attribute(
Foo,
"bars",
uselist=True,
useobject=True,
callable_=loadcollection,
extension=[ReceiveEvents("bars")],
)
with testing.expect_deprecated(
"AttributeExtension.append is deprecated.",
"AttributeExtension.remove is deprecated.",
"AttributeExtension.set is deprecated.",
):
attributes.register_attribute(
Foo,
"bar",
uselist=False,
useobject=True,
callable_=loadscalar,
extension=[ReceiveEvents("bar")],
)
with testing.expect_deprecated(
"AttributeExtension.append is deprecated.",
"AttributeExtension.remove is deprecated.",
"AttributeExtension.set is deprecated.",
):
attributes.register_attribute(
Foo,
"scalar",
uselist=False,
useobject=False,
extension=[ReceiveEvents("scalar")],
)
def create_hist():
def hist(key, fn, *arg):
attributes.instance_state(f1)._commit_all(
attributes.instance_dict(f1)
)
fn(*arg)
histories.append(attributes.get_history(f1, key))
f1 = Foo()
hist("bars", f1.bars.append, b3)
hist("bars", f1.bars.append, b4)
hist("bars", f1.bars.remove, b2)
hist("bar", setattr, f1, "bar", b3)
hist("bar", setattr, f1, "bar", None)
hist("bar", setattr, f1, "bar", b4)
hist("scalar", setattr, f1, "scalar", 5)
hist("scalar", setattr, f1, "scalar", None)
hist("scalar", setattr, f1, "scalar", 4)
histories = []
commit = False
create_hist()
without_commit = list(histories)
histories[:] = []
commit = True
create_hist()
with_commit = histories
for without, with_ in zip(without_commit, with_commit):
woc = without
wic = with_
eq_(woc, wic)
def test_extension_lazyload_assertion(self):
class Foo(fixtures.BasicEntity):
pass
class Bar(fixtures.BasicEntity):
pass
class ReceiveEvents(AttributeExtension):
def append(self, state, child, initiator):
state.obj().bars
return child
def remove(self, state, child, initiator):
state.obj().bars
return child
def set(self, state, child, oldchild, initiator):
return child
instrumentation.register_class(Foo)
instrumentation.register_class(Bar)
bar1, bar2, bar3 = [Bar(id=1), Bar(id=2), Bar(id=3)]
def func1(state, passive):
if passive is attributes.PASSIVE_NO_FETCH:
return attributes.PASSIVE_NO_RESULT
return [bar1, bar2, bar3]
with testing.expect_deprecated(
"AttributeExtension.append is deprecated.",
"AttributeExtension.remove is deprecated.",
"AttributeExtension.set is deprecated.",
):
attributes.register_attribute(
Foo,
"bars",
uselist=True,
callable_=func1,
useobject=True,
extension=[ReceiveEvents()],
)
attributes.register_attribute(
Bar, "foos", uselist=True, useobject=True, backref="bars"
)
x = Foo()
assert_raises(AssertionError, Bar(id=4).foos.append, x)
x.bars
b = Bar(id=4)
b.foos.append(x)
attributes.instance_state(x)._expire_attributes(
attributes.instance_dict(x), ["bars"]
)
assert_raises(AssertionError, b.foos.remove, x)
def test_scalar_listener(self):
# listeners on ScalarAttributeImpl aren't used normally. test that
# they work for the benefit of user extensions
class Foo(object):
pass
results = []
class ReceiveEvents(AttributeExtension):
def append(self, state, child, initiator):
assert False
def remove(self, state, child, initiator):
results.append(("remove", state.obj(), child))
def set(self, state, child, oldchild, initiator):
results.append(("set", state.obj(), child, oldchild))
return child
instrumentation.register_class(Foo)
with testing.expect_deprecated(
"AttributeExtension.append is deprecated.",
"AttributeExtension.remove is deprecated.",
"AttributeExtension.set is deprecated.",
):
attributes.register_attribute(
Foo,
"x",
uselist=False,
useobject=False,
extension=ReceiveEvents(),
)
f = Foo()
f.x = 5
f.x = 17
del f.x
eq_(
results,
[
("set", f, 5, attributes.NEVER_SET),
("set", f, 17, 5),
("remove", f, 17),
],
)
def test_cascading_extensions(self):
t1 = Table(
"t1",
MetaData(),
Column("id", Integer, primary_key=True),
Column("type", String(40)),
Column("data", String(50)),
)
ext_msg = []
class Ex1(AttributeExtension):
def set(self, state, value, oldvalue, initiator):
ext_msg.append("Ex1 %r" % value)
return "ex1" + value
class Ex2(AttributeExtension):
def set(self, state, value, oldvalue, initiator):
ext_msg.append("Ex2 %r" % value)
return "ex2" + value
class A(fixtures.BasicEntity):
pass
class B(A):
pass
class C(B):
pass
with testing.expect_deprecated(
"AttributeExtension is deprecated in favor of the "
"AttributeEvents listener interface. "
"The column_property.extension parameter"
):
mapper(
A,
t1,
polymorphic_on=t1.c.type,
polymorphic_identity="a",
properties={
"data": column_property(t1.c.data, extension=Ex1())
},
)
mapper(B, polymorphic_identity="b", inherits=A)
with testing.expect_deprecated(
"AttributeExtension is deprecated in favor of the "
"AttributeEvents listener interface. "
"The column_property.extension parameter"
):
mapper(
C,
polymorphic_identity="c",
inherits=B,
properties={
"data": column_property(t1.c.data, extension=Ex2())
},
)
with testing.expect_deprecated(
"AttributeExtension.set is deprecated. "
):
configure_mappers()
a1 = A(data="a1")
b1 = B(data="b1")
c1 = C(data="c1")
eq_(a1.data, "ex1a1")
eq_(b1.data, "ex1b1")
eq_(c1.data, "ex2c1")
a1.data = "a2"
b1.data = "b2"
c1.data = "c2"
eq_(a1.data, "ex1a2")
eq_(b1.data, "ex1b2")
eq_(c1.data, "ex2c2")
eq_(
ext_msg,
[
"Ex1 'a1'",
"Ex1 'b1'",
"Ex2 'c1'",
"Ex1 'a2'",
"Ex1 'b2'",
"Ex2 'c2'",
],
)
class DeprecatedOptionAllTest(OptionsPathTest, _fixtures.FixtureTest):
run_inserts = "once"
run_deletes = None
def _mapper_fixture_one(self):
users, User, addresses, Address, orders, Order = (
self.tables.users,
self.classes.User,
self.tables.addresses,
self.classes.Address,
self.tables.orders,
self.classes.Order,
)
keywords, items, item_keywords, Keyword, Item = (
self.tables.keywords,
self.tables.items,
self.tables.item_keywords,
self.classes.Keyword,
self.classes.Item,
)
mapper(
User,
users,
properties={
"addresses": relationship(Address),
"orders": relationship(Order),
},
)
mapper(Address, addresses)
mapper(
Order,
orders,
properties={
"items": relationship(Item, secondary=self.tables.order_items)
},
)
mapper(
Keyword,
keywords,
properties={
"keywords": column_property(keywords.c.name + "some keyword")
},
)
mapper(
Item,
items,
properties=dict(
keywords=relationship(Keyword, secondary=item_keywords)
),
)
def _assert_eager_with_entity_exception(
self, entity_list, options, message
):
assert_raises_message(
sa.exc.ArgumentError,
message,
create_session().query(*entity_list).options,
*options
)
def test_option_against_nonexistent_twolevel_all(self):
self._mapper_fixture_one()
Item = self.classes.Item
with testing.expect_deprecated(
r"The joinedload_all\(\) function is deprecated, and "
"will be removed in a future release. "
r"Please use method chaining with joinedload\(\)"
):
self._assert_eager_with_entity_exception(
[Item],
(joinedload_all("keywords.foo"),),
'Can\'t find property named \\"foo\\" on mapped class '
"Keyword->keywords in this Query.",
)
def test_all_path_vs_chained(self):
self._mapper_fixture_one()
User = self.classes.User
Order = self.classes.Order
Item = self.classes.Item
with testing.expect_deprecated(
r"The joinedload_all\(\) function is deprecated, and "
"will be removed in a future release. "
r"Please use method chaining with joinedload\(\)"
):
l1 = joinedload_all("orders.items.keywords")
sess = Session()
q = sess.query(User)
self._assert_path_result(
l1,
q,
[
(User, "orders"),
(User, "orders", Order, "items"),
(User, "orders", Order, "items", Item, "keywords"),
],
)
l2 = joinedload("orders").joinedload("items").joinedload("keywords")
self._assert_path_result(
l2,
q,
[
(User, "orders"),
(User, "orders", Order, "items"),
(User, "orders", Order, "items", Item, "keywords"),
],
)
def test_subqueryload_mapper_order_by(self):
users, User, Address, addresses = (
self.tables.users,
self.classes.User,
self.classes.Address,
self.tables.addresses,
)
mapper(Address, addresses)
with testing.expect_deprecated(
".*Mapper.order_by parameter is deprecated"
):
mapper(
User,
users,
properties={
"addresses": relationship(
Address, lazy="subquery", order_by=addresses.c.id
)
},
order_by=users.c.id.desc(),
)
sess = create_session()
q = sess.query(User)
result = q.limit(2).all()
eq_(result, list(reversed(self.static.user_address_result[2:4])))
def test_selectinload_mapper_order_by(self):
users, User, Address, addresses = (
self.tables.users,
self.classes.User,
self.classes.Address,
self.tables.addresses,
)
mapper(Address, addresses)
with testing.expect_deprecated(
".*Mapper.order_by parameter is deprecated"
):
mapper(
User,
users,
properties={
"addresses": relationship(
Address, lazy="selectin", order_by=addresses.c.id
)
},
order_by=users.c.id.desc(),
)
sess = create_session()
q = sess.query(User)
result = q.limit(2).all()
eq_(result, list(reversed(self.static.user_address_result[2:4])))
def test_join_mapper_order_by(self):
"""test that mapper-level order_by is adapted to a selectable."""
User, users = self.classes.User, self.tables.users
with testing.expect_deprecated(
".*Mapper.order_by parameter is deprecated"
):
mapper(User, users, order_by=users.c.id)
sel = users.select(users.c.id.in_([7, 8]))
sess = create_session()
with DeprecatedQueryTest._expect_implicit_subquery():
eq_(
sess.query(User).select_entity_from(sel).all(),
[User(name="jack", id=7), User(name="ed", id=8)],
)
def test_defer_addtl_attrs(self):
users, User, Address, addresses = (
self.tables.users,
self.classes.User,
self.classes.Address,
self.tables.addresses,
)
mapper(Address, addresses)
mapper(
User,
users,
properties={
"addresses": relationship(
Address, lazy="selectin", order_by=addresses.c.id
)
},
)
sess = create_session()
with testing.expect_deprecated(
r"The \*addl_attrs on orm.defer is deprecated. "
"Please use method chaining"
):
sess.query(User).options(defer("addresses", "email_address"))
with testing.expect_deprecated(
r"The \*addl_attrs on orm.undefer is deprecated. "
"Please use method chaining"
):
sess.query(User).options(undefer("addresses", "email_address"))
class LegacyLockModeTest(_fixtures.FixtureTest):
run_inserts = None
@classmethod
def setup_mappers(cls):
User, users = cls.classes.User, cls.tables.users
mapper(User, users)
def _assert_legacy(self, arg, read=False, nowait=False):
User = self.classes.User
s = Session()
with testing.expect_deprecated(
r"The Query.with_lockmode\(\) method is deprecated"
):
q = s.query(User).with_lockmode(arg)
sel = q._compile_context().statement
if arg is None:
assert q._for_update_arg is None
assert sel._for_update_arg is None
return
assert q._for_update_arg.read is read
assert q._for_update_arg.nowait is nowait
assert sel._for_update_arg.read is read
assert sel._for_update_arg.nowait is nowait
def test_false_legacy(self):
self._assert_legacy(None)
def test_plain_legacy(self):
self._assert_legacy("update")
def test_nowait_legacy(self):
self._assert_legacy("update_nowait", nowait=True)
def test_read_legacy(self):
self._assert_legacy("read", read=True)
def test_unknown_legacy_lock_mode(self):
User = self.classes.User
sess = Session()
with testing.expect_deprecated(
r"The Query.with_lockmode\(\) method is deprecated"
):
assert_raises_message(
exc.ArgumentError,
"Unknown with_lockmode argument: 'unknown_mode'",
sess.query(User.id).with_lockmode,
"unknown_mode",
)
class InstrumentationTest(fixtures.ORMTest):
def test_dict_subclass4(self):
# tests #2654
with testing.expect_deprecated(
r"The collection.converter\(\) handler is deprecated and will "
"be removed in a future release. Please refer to the "
"AttributeEvents"
):
class MyDict(collections.MappedCollection):
def __init__(self):
super(MyDict, self).__init__(lambda value: "k%d" % value)
@collection.converter
def _convert(self, dictlike):
for key, value in dictlike.items():
yield value + 5
class Foo(object):
pass
instrumentation.register_class(Foo)
attributes.register_attribute(
Foo, "attr", uselist=True, typecallable=MyDict, useobject=True
)
f = Foo()
f.attr = {"k1": 1, "k2": 2}
eq_(f.attr, {"k7": 7, "k6": 6})
def test_name_setup(self):
with testing.expect_deprecated(
r"The collection.converter\(\) handler is deprecated and will "
"be removed in a future release. Please refer to the "
"AttributeEvents"
):
class Base(object):
@collection.iterator
def base_iterate(self, x):
return "base_iterate"
@collection.appender
def base_append(self, x):
return "base_append"
@collection.converter
def base_convert(self, x):
return "base_convert"
@collection.remover
def base_remove(self, x):
return "base_remove"
from sqlalchemy.orm.collections import _instrument_class
_instrument_class(Base)
eq_(Base._sa_remover(Base(), 5), "base_remove")
eq_(Base._sa_appender(Base(), 5), "base_append")
eq_(Base._sa_iterator(Base(), 5), "base_iterate")
eq_(Base._sa_converter(Base(), 5), "base_convert")
with testing.expect_deprecated(
r"The collection.converter\(\) handler is deprecated and will "
"be removed in a future release. Please refer to the "
"AttributeEvents"
):
class Sub(Base):
@collection.converter
def base_convert(self, x):
return "sub_convert"
@collection.remover
def sub_remove(self, x):
return "sub_remove"
_instrument_class(Sub)
eq_(Sub._sa_appender(Sub(), 5), "base_append")
eq_(Sub._sa_remover(Sub(), 5), "sub_remove")
eq_(Sub._sa_iterator(Sub(), 5), "base_iterate")
eq_(Sub._sa_converter(Sub(), 5), "sub_convert")
def test_link_event(self):
canary = []
with testing.expect_deprecated(
r"The collection.linker\(\) handler is deprecated and will "
"be removed in a future release. Please refer to the "
"AttributeEvents"
):
class Collection(list):
@collection.linker
def _on_link(self, obj):
canary.append(obj)
class Foo(object):
pass
instrumentation.register_class(Foo)
attributes.register_attribute(
Foo, "attr", uselist=True, typecallable=Collection, useobject=True
)
f1 = Foo()
f1.attr.append(3)
eq_(canary, [f1.attr._sa_adapter])
adapter_1 = f1.attr._sa_adapter
l2 = Collection()
f1.attr = l2
eq_(canary, [adapter_1, f1.attr._sa_adapter, None])
class NonPrimaryRelationshipLoaderTest(_fixtures.FixtureTest):
run_inserts = "once"
run_deletes = None
def test_selectload(self):
"""tests lazy loading with two relationships simultaneously,
from the same table, using aliases. """
users, orders, User, Address, Order, addresses = (
self.tables.users,
self.tables.orders,
self.classes.User,
self.classes.Address,
self.classes.Order,
self.tables.addresses,
)
openorders = sa.alias(orders, "openorders")
closedorders = sa.alias(orders, "closedorders")
mapper(Address, addresses)
mapper(Order, orders)
with testing.expect_deprecated(
"The mapper.non_primary parameter is deprecated"
):
open_mapper = mapper(Order, openorders, non_primary=True)
closed_mapper = mapper(Order, closedorders, non_primary=True)
mapper(
User,
users,
properties=dict(
addresses=relationship(Address, lazy=True),
open_orders=relationship(
open_mapper,
primaryjoin=sa.and_(
openorders.c.isopen == 1,
users.c.id == openorders.c.user_id,
),
lazy="select",
),
closed_orders=relationship(
closed_mapper,
primaryjoin=sa.and_(
closedorders.c.isopen == 0,
users.c.id == closedorders.c.user_id,
),
lazy="select",
),
),
)
self._run_double_test(10)
def test_joinedload(self):
"""Eager loading with two relationships simultaneously,
from the same table, using aliases."""
users, orders, User, Address, Order, addresses = (
self.tables.users,
self.tables.orders,
self.classes.User,
self.classes.Address,
self.classes.Order,
self.tables.addresses,
)
openorders = sa.alias(orders, "openorders")
closedorders = sa.alias(orders, "closedorders")
mapper(Address, addresses)
mapper(Order, orders)
with testing.expect_deprecated(
"The mapper.non_primary parameter is deprecated"
):
open_mapper = mapper(Order, openorders, non_primary=True)
closed_mapper = mapper(Order, closedorders, non_primary=True)
mapper(
User,
users,
properties=dict(
addresses=relationship(
Address, lazy="joined", order_by=addresses.c.id
),
open_orders=relationship(
open_mapper,
primaryjoin=sa.and_(
openorders.c.isopen == 1,
users.c.id == openorders.c.user_id,
),
lazy="joined",
order_by=openorders.c.id,
),
closed_orders=relationship(
closed_mapper,
primaryjoin=sa.and_(
closedorders.c.isopen == 0,
users.c.id == closedorders.c.user_id,
),
lazy="joined",
order_by=closedorders.c.id,
),
),
)
self._run_double_test(1)
def test_selectin(self):
users, orders, User, Address, Order, addresses = (
self.tables.users,
self.tables.orders,
self.classes.User,
self.classes.Address,
self.classes.Order,
self.tables.addresses,
)
openorders = sa.alias(orders, "openorders")
closedorders = sa.alias(orders, "closedorders")
mapper(Address, addresses)
mapper(Order, orders)
with testing.expect_deprecated(
"The mapper.non_primary parameter is deprecated"
):
open_mapper = mapper(Order, openorders, non_primary=True)
closed_mapper = mapper(Order, closedorders, non_primary=True)
mapper(
User,
users,
properties=dict(
addresses=relationship(
Address, lazy="selectin", order_by=addresses.c.id
),
open_orders=relationship(
open_mapper,
primaryjoin=sa.and_(
openorders.c.isopen == 1,
users.c.id == openorders.c.user_id,
),
lazy="selectin",
order_by=openorders.c.id,
),
closed_orders=relationship(
closed_mapper,
primaryjoin=sa.and_(
closedorders.c.isopen == 0,
users.c.id == closedorders.c.user_id,
),
lazy="selectin",
order_by=closedorders.c.id,
),
),
)
self._run_double_test(4)
def test_subqueryload(self):
users, orders, User, Address, Order, addresses = (
self.tables.users,
self.tables.orders,
self.classes.User,
self.classes.Address,
self.classes.Order,
self.tables.addresses,
)
openorders = sa.alias(orders, "openorders")
closedorders = sa.alias(orders, "closedorders")
mapper(Address, addresses)
mapper(Order, orders)
with testing.expect_deprecated(
"The mapper.non_primary parameter is deprecated"
):
open_mapper = mapper(Order, openorders, non_primary=True)
closed_mapper = mapper(Order, closedorders, non_primary=True)
mapper(
User,
users,
properties=dict(
addresses=relationship(
Address, lazy="subquery", order_by=addresses.c.id
),
open_orders=relationship(
open_mapper,
primaryjoin=sa.and_(
openorders.c.isopen == 1,
users.c.id == openorders.c.user_id,
),
lazy="subquery",
order_by=openorders.c.id,
),
closed_orders=relationship(
closed_mapper,
primaryjoin=sa.and_(
closedorders.c.isopen == 0,
users.c.id == closedorders.c.user_id,
),
lazy="subquery",
order_by=closedorders.c.id,
),
),
)
self._run_double_test(4)
def _run_double_test(self, count):
User, Address, Order, Item = self.classes(
"User", "Address", "Order", "Item"
)
q = create_session().query(User).order_by(User.id)
def go():
eq_(
[
User(
id=7,
addresses=[Address(id=1)],
open_orders=[Order(id=3)],
closed_orders=[Order(id=1), Order(id=5)],
),
User(
id=8,
addresses=[
Address(id=2),
Address(id=3),
Address(id=4),
],
open_orders=[],
closed_orders=[],
),
User(
id=9,
addresses=[Address(id=5)],
open_orders=[Order(id=4)],
closed_orders=[Order(id=2)],
),
User(id=10),
],
q.all(),
)
self.assert_sql_count(testing.db, go, count)
sess = create_session()
user = sess.query(User).get(7)
closed_mapper = User.closed_orders.entity
open_mapper = User.open_orders.entity
eq_(
[Order(id=1), Order(id=5)],
create_session()
.query(closed_mapper)
.with_parent(user, property="closed_orders")
.all(),
)
eq_(
[Order(id=3)],
create_session()
.query(open_mapper)
.with_parent(user, property="open_orders")
.all(),
)
class NonPrimaryMapperTest(_fixtures.FixtureTest, AssertsCompiledSQL):
__dialect__ = "default"
def test_non_primary_identity_class(self):
User = self.classes.User
users, addresses = self.tables.users, self.tables.addresses
class AddressUser(User):
pass
mapper(User, users, polymorphic_identity="user")
m2 = mapper(
AddressUser,
addresses,
inherits=User,
polymorphic_identity="address",
properties={"address_id": addresses.c.id},
)
with testing.expect_deprecated(
"The mapper.non_primary parameter is deprecated"
):
m3 = mapper(AddressUser, addresses, non_primary=True)
assert m3._identity_class is m2._identity_class
eq_(
m2.identity_key_from_instance(AddressUser()),
m3.identity_key_from_instance(AddressUser()),
)
def test_illegal_non_primary(self):
users, Address, addresses, User = (
self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User,
)
mapper(User, users)
mapper(Address, addresses)
with testing.expect_deprecated(
"The mapper.non_primary parameter is deprecated"
):
mapper(
User,
users,
non_primary=True,
properties={"addresses": relationship(Address)},
)
assert_raises_message(
sa.exc.ArgumentError,
"Attempting to assign a new relationship 'addresses' "
"to a non-primary mapper on class 'User'",
configure_mappers,
)
def test_illegal_non_primary_2(self):
User, users = self.classes.User, self.tables.users
with testing.expect_deprecated(
"The mapper.non_primary parameter is deprecated"
):
assert_raises_message(
sa.exc.InvalidRequestError,
"Configure a primary mapper first",
mapper,
User,
users,
non_primary=True,
)
def test_illegal_non_primary_3(self):
users, addresses = self.tables.users, self.tables.addresses
class Base(object):
pass
class Sub(Base):
pass
mapper(Base, users)
with testing.expect_deprecated(
"The mapper.non_primary parameter is deprecated"
):
assert_raises_message(
sa.exc.InvalidRequestError,
"Configure a primary mapper first",
mapper,
Sub,
addresses,
non_primary=True,
)
| 32.066801
| 79
| 0.534435
|
import sqlalchemy as sa
from sqlalchemy import and_
from sqlalchemy import event
from sqlalchemy import exc
from sqlalchemy import func
from sqlalchemy import Integer
from sqlalchemy import MetaData
from sqlalchemy import select
from sqlalchemy import String
from sqlalchemy import testing
from sqlalchemy import text
from sqlalchemy.ext.declarative import comparable_using
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import aliased
from sqlalchemy.orm import AttributeExtension
from sqlalchemy.orm import attributes
from sqlalchemy.orm import collections
from sqlalchemy.orm import column_property
from sqlalchemy.orm import comparable_property
from sqlalchemy.orm import composite
from sqlalchemy.orm import configure_mappers
from sqlalchemy.orm import contains_eager
from sqlalchemy.orm import create_session
from sqlalchemy.orm import defer
from sqlalchemy.orm import deferred
from sqlalchemy.orm import EXT_CONTINUE
from sqlalchemy.orm import identity
from sqlalchemy.orm import instrumentation
from sqlalchemy.orm import joinedload
from sqlalchemy.orm import joinedload_all
from sqlalchemy.orm import mapper
from sqlalchemy.orm import MapperExtension
from sqlalchemy.orm import PropComparator
from sqlalchemy.orm import relationship
from sqlalchemy.orm import Session
from sqlalchemy.orm import SessionExtension
from sqlalchemy.orm import sessionmaker
from sqlalchemy.orm import synonym
from sqlalchemy.orm import undefer
from sqlalchemy.orm import with_polymorphic
from sqlalchemy.orm.collections import collection
from sqlalchemy.orm.util import polymorphic_union
from sqlalchemy.testing import assert_raises
from sqlalchemy.testing import assert_raises_message
from sqlalchemy.testing import assertions
from sqlalchemy.testing import AssertsCompiledSQL
from sqlalchemy.testing import eq_
from sqlalchemy.testing import fixtures
from sqlalchemy.testing import is_
from sqlalchemy.testing import is_true
from sqlalchemy.testing.schema import Column
from sqlalchemy.testing.schema import Table
from sqlalchemy.testing.util import gc_collect
from sqlalchemy.util.compat import pypy
from . import _fixtures
from .inheritance import _poly_fixtures
from .test_options import PathTest as OptionsPathTest
from .test_transaction import _LocalFixture
class DeprecationWarningsTest(fixtures.DeclarativeMappedTest):
run_setup_classes = "each"
run_setup_mappers = "each"
run_define_tables = "each"
run_create_tables = None
def test_attribute_extension(self):
class SomeExtension(AttributeExtension):
def append(self, obj, value, initiator):
pass
def remove(self, obj, value, initiator):
pass
def set(self, obj, value, oldvalue, initiator):
pass
with assertions.expect_deprecated(
".*The column_property.extension parameter will be removed in a "
"future release."
):
class Foo(self.DeclarativeBasic):
__tablename__ = "foo"
id = Column(Integer, primary_key=True)
foo = column_property(
Column("q", Integer), extension=SomeExtension()
)
with assertions.expect_deprecated(
"AttributeExtension.append is deprecated. The "
"AttributeExtension class will be removed in a future release.",
"AttributeExtension.remove is deprecated. The "
"AttributeExtension class will be removed in a future release.",
"AttributeExtension.set is deprecated. The "
"AttributeExtension class will be removed in a future release.",
):
configure_mappers()
def test_attribute_extension_parameter(self):
class SomeExtension(AttributeExtension):
def append(self, obj, value, initiator):
pass
with assertions.expect_deprecated(
".*The relationship.extension parameter will be removed in a "
"future release."
):
relationship("Bar", extension=SomeExtension)
with assertions.expect_deprecated(
".*The column_property.extension parameter will be removed in a "
"future release."
):
column_property(Column("q", Integer), extension=SomeExtension)
with assertions.expect_deprecated(
".*The composite.extension parameter will be removed in a "
"future release."
):
composite("foo", extension=SomeExtension)
def test_session_extension(self):
class SomeExtension(SessionExtension):
def after_commit(self, session):
pass
def after_rollback(self, session):
pass
def before_flush(self, session, flush_context, instances):
pass
with assertions.expect_deprecated(
".*The Session.extension parameter will be removed",
"SessionExtension.after_commit is deprecated. "
"The SessionExtension class",
"SessionExtension.before_flush is deprecated. "
"The SessionExtension class",
"SessionExtension.after_rollback is deprecated. "
"The SessionExtension class",
):
Session(extension=SomeExtension())
def test_mapper_extension(self):
class SomeExtension(MapperExtension):
def init_instance(
self, mapper, class_, oldinit, instance, args, kwargs
):
pass
def init_failed(
self, mapper, class_, oldinit, instance, args, kwargs
):
pass
with assertions.expect_deprecated(
"MapperExtension.init_instance is deprecated. "
"The MapperExtension class",
"MapperExtension.init_failed is deprecated. "
"The MapperExtension class",
".*The mapper.extension parameter will be removed",
):
class Foo(self.DeclarativeBasic):
__tablename__ = "foo"
id = Column(Integer, primary_key=True)
__mapper_args__ = {"extension": SomeExtension()}
def test_session_weak_identity_map(self):
with testing.expect_deprecated(
".*Session.weak_identity_map parameter as well as the"
):
s = Session(weak_identity_map=True)
is_(s._identity_cls, identity.WeakInstanceDict)
with assertions.expect_deprecated(
"The Session.weak_identity_map parameter as well as"
):
s = Session(weak_identity_map=False)
is_(s._identity_cls, identity.StrongInstanceDict)
s = Session()
is_(s._identity_cls, identity.WeakInstanceDict)
def test_session_prune(self):
s = Session()
with assertions.expect_deprecated(
r"The Session.prune\(\) method is deprecated along with "
"Session.weak_identity_map"
):
s.prune()
def test_session_enable_transaction_accounting(self):
with assertions.expect_deprecated(
"the Session._enable_transaction_accounting parameter is "
"deprecated"
):
Session(_enable_transaction_accounting=False)
def test_session_is_modified(self):
class Foo(self.DeclarativeBasic):
__tablename__ = "foo"
id = Column(Integer, primary_key=True)
f1 = Foo()
s = Session()
with assertions.expect_deprecated(
"The Session.is_modified.passive flag is deprecated"
):
# so that the warning emits
s.is_modified(f1, passive=True)
class DeprecatedAccountingFlagsTest(_LocalFixture):
def test_rollback_no_accounting(self):
User, users = self.classes.User, self.tables.users
with testing.expect_deprecated(
"The Session._enable_transaction_accounting parameter"
):
sess = sessionmaker(_enable_transaction_accounting=False)()
u1 = User(name="ed")
sess.add(u1)
sess.commit()
u1.name = "edwardo"
sess.rollback()
testing.db.execute(
users.update(users.c.name == "ed").values(name="edward")
)
assert u1.name == "edwardo"
sess.expire_all()
assert u1.name == "edward"
def test_commit_no_accounting(self):
User, users = self.classes.User, self.tables.users
with testing.expect_deprecated(
"The Session._enable_transaction_accounting parameter"
):
sess = sessionmaker(_enable_transaction_accounting=False)()
u1 = User(name="ed")
sess.add(u1)
sess.commit()
u1.name = "edwardo"
sess.rollback()
testing.db.execute(
users.update(users.c.name == "ed").values(name="edward")
)
assert u1.name == "edwardo"
sess.commit()
assert testing.db.execute(select([users.c.name])).fetchall() == [
("edwardo",)
]
assert u1.name == "edwardo"
sess.delete(u1)
sess.commit()
def test_preflush_no_accounting(self):
User, users = self.classes.User, self.tables.users
with testing.expect_deprecated(
"The Session._enable_transaction_accounting parameter"
):
sess = Session(
_enable_transaction_accounting=False,
autocommit=True,
autoflush=False,
)
u1 = User(name="ed")
sess.add(u1)
sess.flush()
sess.begin()
u1.name = "edwardo"
u2 = User(name="some other user")
sess.add(u2)
sess.rollback()
sess.begin()
assert testing.db.execute(select([users.c.name])).fetchall() == [
("ed",)
]
class DeprecatedSessionFeatureTest(_fixtures.FixtureTest):
run_inserts = None
def test_fast_discard_race(self):
# test issue #4068
users, User = self.tables.users, self.classes.User
mapper(User, users)
with testing.expect_deprecated(".*identity map are deprecated"):
sess = Session(weak_identity_map=False)
u1 = User(name="u1")
sess.add(u1)
sess.commit()
u1_state = u1._sa_instance_state
sess.identity_map._dict.pop(u1_state.key)
ref = u1_state.obj
u1_state.obj = lambda: None
u2 = sess.query(User).first()
u1_state._cleanup(ref)
u3 = sess.query(User).first()
is_(u2, u3)
u2_state = u2._sa_instance_state
assert sess.identity_map.contains_state(u2._sa_instance_state)
ref = u2_state.obj
u2_state.obj = lambda: None
u2_state._cleanup(ref)
assert not sess.identity_map.contains_state(u2._sa_instance_state)
def test_is_modified_passive_on(self):
User, Address = self.classes.User, self.classes.Address
users, addresses = self.tables.users, self.tables.addresses
mapper(User, users, properties={"addresses": relationship(Address)})
mapper(Address, addresses)
s = Session()
u = User(name="fred", addresses=[Address(email_address="foo")])
s.add(u)
s.commit()
u.id
def go():
assert not s.is_modified(u, passive=True)
with testing.expect_deprecated(
".*Session.is_modified.passive flag is deprecated "
):
self.assert_sql_count(testing.db, go, 0)
u.name = "newname"
def go():
assert s.is_modified(u, passive=True)
with testing.expect_deprecated(
".*Session.is_modified.passive flag is deprecated "
):
self.assert_sql_count(testing.db, go, 0)
class StrongIdentityMapTest(_fixtures.FixtureTest):
run_inserts = None
def _strong_ident_fixture(self):
with testing.expect_deprecated(
".*Session.weak_identity_map parameter as well as the"
):
sess = create_session(weak_identity_map=False)
def prune():
with testing.expect_deprecated(".*Session.prune"):
return sess.prune()
return sess, prune
def _event_fixture(self):
session = create_session()
@event.listens_for(session, "pending_to_persistent")
@event.listens_for(session, "deleted_to_persistent")
@event.listens_for(session, "detached_to_persistent")
@event.listens_for(session, "loaded_as_persistent")
def strong_ref_object(sess, instance):
if "refs" not in sess.info:
sess.info["refs"] = refs = set()
else:
refs = sess.info["refs"]
refs.add(instance)
@event.listens_for(session, "persistent_to_detached")
@event.listens_for(session, "persistent_to_deleted")
@event.listens_for(session, "persistent_to_transient")
def deref_object(sess, instance):
sess.info["refs"].discard(instance)
def prune():
if "refs" not in session.info:
return 0
sess_size = len(session.identity_map)
session.info["refs"].clear()
gc_collect()
session.info["refs"] = set(
s.obj() for s in session.identity_map.all_states()
)
return sess_size - len(session.identity_map)
return session, prune
def test_strong_ref_imap(self):
self._test_strong_ref(self._strong_ident_fixture)
def test_strong_ref_events(self):
self._test_strong_ref(self._event_fixture)
def _test_strong_ref(self, fixture):
s, prune = fixture()
users, User = self.tables.users, self.classes.User
mapper(User, users)
# save user
s.add(User(name="u1"))
s.flush()
user = s.query(User).one()
user = None
print(s.identity_map)
gc_collect()
assert len(s.identity_map) == 1
user = s.query(User).one()
assert not s.identity_map._modified
user.name = "u2"
assert s.identity_map._modified
s.flush()
eq_(users.select().execute().fetchall(), [(user.id, "u2")])
def test_prune_imap(self):
self._test_prune(self._strong_ident_fixture)
def test_prune_events(self):
self._test_prune(self._event_fixture)
@testing.fails_if(lambda: pypy, "pypy has a real GC")
@testing.fails_on("+zxjdbc", "http://www.sqlalchemy.org/trac/ticket/1473")
def _test_prune(self, fixture):
s, prune = fixture()
users, User = self.tables.users, self.classes.User
mapper(User, users)
for o in [User(name="u%s" % x) for x in range(10)]:
s.add(o)
# o is still live after this loop...
self.assert_(len(s.identity_map) == 0)
eq_(prune(), 0)
s.flush()
gc_collect()
eq_(prune(), 9)
# o is still in local scope here, so still present
self.assert_(len(s.identity_map) == 1)
id_ = o.id
del o
eq_(prune(), 1)
self.assert_(len(s.identity_map) == 0)
u = s.query(User).get(id_)
eq_(prune(), 0)
self.assert_(len(s.identity_map) == 1)
u.name = "squiznart"
del u
eq_(prune(), 0)
self.assert_(len(s.identity_map) == 1)
s.flush()
eq_(prune(), 1)
self.assert_(len(s.identity_map) == 0)
s.add(User(name="x"))
eq_(prune(), 0)
self.assert_(len(s.identity_map) == 0)
s.flush()
self.assert_(len(s.identity_map) == 1)
eq_(prune(), 1)
self.assert_(len(s.identity_map) == 0)
u = s.query(User).get(id_)
s.delete(u)
del u
eq_(prune(), 0)
self.assert_(len(s.identity_map) == 1)
s.flush()
eq_(prune(), 0)
self.assert_(len(s.identity_map) == 0)
class DeprecatedQueryTest(_fixtures.FixtureTest, AssertsCompiledSQL):
__dialect__ = "default"
run_setup_mappers = "once"
run_inserts = "once"
run_deletes = None
@classmethod
def setup_mappers(cls):
cls._setup_stock_mapping()
@classmethod
def _expect_implicit_subquery(cls):
return assertions.expect_deprecated(
"Implicit coercion of SELECT and textual SELECT constructs into "
r"FROM clauses is deprecated; please call \.subquery\(\) on any "
"Core select or ORM Query object in order to produce a "
"subquery object."
)
def test_via_textasfrom_select_from(self):
User = self.classes.User
s = create_session()
with self._expect_implicit_subquery():
eq_(
s.query(User)
.select_from(
text("select * from users").columns(
id=Integer, name=String
)
)
.order_by(User.id)
.all(),
[User(id=7), User(id=8), User(id=9), User(id=10)],
)
def test_query_as_scalar(self):
User = self.classes.User
s = Session()
with assertions.expect_deprecated(
r"The Query.as_scalar\(\) method is deprecated and will "
"be removed in a future release."
):
s.query(User).as_scalar()
def test_select_entity_from_crit(self):
User, users = self.classes.User, self.tables.users
sel = users.select()
sess = create_session()
with self._expect_implicit_subquery():
eq_(
sess.query(User)
.select_entity_from(sel)
.filter(User.id.in_([7, 8]))
.all(),
[User(name="jack", id=7), User(name="ed", id=8)],
)
def test_select_entity_from_select(self):
User, users = self.classes.User, self.tables.users
sess = create_session()
with self._expect_implicit_subquery():
self.assert_compile(
sess.query(User.name).select_entity_from(
users.select().where(users.c.id > 5)
),
"SELECT anon_1.name AS anon_1_name FROM "
"(SELECT users.id AS id, users.name AS name FROM users "
"WHERE users.id > :id_1) AS anon_1",
)
def test_select_entity_from_q_statement(self):
User = self.classes.User
sess = create_session()
q = sess.query(User)
with self._expect_implicit_subquery():
q = sess.query(User).select_entity_from(q.statement)
self.assert_compile(
q.filter(User.name == "ed"),
"SELECT anon_1.id AS anon_1_id, anon_1.name AS anon_1_name "
"FROM (SELECT users.id AS id, users.name AS name FROM "
"users) AS anon_1 WHERE anon_1.name = :name_1",
)
def test_select_from_q_statement_no_aliasing(self):
User = self.classes.User
sess = create_session()
q = sess.query(User)
with self._expect_implicit_subquery():
q = sess.query(User).select_from(q.statement)
self.assert_compile(
q.filter(User.name == "ed"),
"SELECT users.id AS users_id, users.name AS users_name "
"FROM users, (SELECT users.id AS id, users.name AS name FROM "
"users) AS anon_1 WHERE users.name = :name_1",
)
def test_from_alias_three(self):
User, addresses, users = (
self.classes.User,
self.tables.addresses,
self.tables.users,
)
query = (
users.select(users.c.id == 7)
.union(users.select(users.c.id > 7))
.alias("ulist")
.outerjoin(addresses)
.select(
use_labels=True, order_by=[text("ulist.id"), addresses.c.id]
)
)
sess = create_session()
# better way. use select_entity_from()
def go():
with self._expect_implicit_subquery():
result = (
sess.query(User)
.select_entity_from(query)
.options(contains_eager("addresses"))
.all()
)
assert self.static.user_address_result == result
self.assert_sql_count(testing.db, go, 1)
def test_from_alias_four(self):
User, addresses, users = (
self.classes.User,
self.tables.addresses,
self.tables.users,
)
sess = create_session()
# same thing, but alias addresses, so that the adapter
# generated by select_entity_from() is wrapped within
# the adapter created by contains_eager()
adalias = addresses.alias()
query = (
users.select(users.c.id == 7)
.union(users.select(users.c.id > 7))
.alias("ulist")
.outerjoin(adalias)
.select(use_labels=True, order_by=[text("ulist.id"), adalias.c.id])
)
def go():
with self._expect_implicit_subquery():
result = (
sess.query(User)
.select_entity_from(query)
.options(contains_eager("addresses", alias=adalias))
.all()
)
assert self.static.user_address_result == result
self.assert_sql_count(testing.db, go, 1)
def test_select(self):
users = self.tables.users
sess = create_session()
with self._expect_implicit_subquery():
self.assert_compile(
sess.query(users)
.select_entity_from(users.select())
.with_labels()
.statement,
"SELECT users.id AS users_id, users.name AS users_name "
"FROM users, "
"(SELECT users.id AS id, users.name AS name FROM users) "
"AS anon_1",
)
def test_join(self):
users, Address, User = (
self.tables.users,
self.classes.Address,
self.classes.User,
)
# mapper(User, users, properties={"addresses": relationship(Address)})
# mapper(Address, addresses)
sel = users.select(users.c.id.in_([7, 8]))
sess = create_session()
with self._expect_implicit_subquery():
result = (
sess.query(User)
.select_entity_from(sel)
.join("addresses")
.add_entity(Address)
.order_by(User.id)
.order_by(Address.id)
.all()
)
eq_(
result,
[
(
User(name="jack", id=7),
Address(user_id=7, email_address="jack@bean.com", id=1),
),
(
User(name="ed", id=8),
Address(user_id=8, email_address="ed@wood.com", id=2),
),
(
User(name="ed", id=8),
Address(user_id=8, email_address="ed@bettyboop.com", id=3),
),
(
User(name="ed", id=8),
Address(user_id=8, email_address="ed@lala.com", id=4),
),
],
)
adalias = aliased(Address)
with self._expect_implicit_subquery():
result = (
sess.query(User)
.select_entity_from(sel)
.join(adalias, "addresses")
.add_entity(adalias)
.order_by(User.id)
.order_by(adalias.id)
.all()
)
eq_(
result,
[
(
User(name="jack", id=7),
Address(user_id=7, email_address="jack@bean.com", id=1),
),
(
User(name="ed", id=8),
Address(user_id=8, email_address="ed@wood.com", id=2),
),
(
User(name="ed", id=8),
Address(user_id=8, email_address="ed@bettyboop.com", id=3),
),
(
User(name="ed", id=8),
Address(user_id=8, email_address="ed@lala.com", id=4),
),
],
)
def test_more_joins(self):
(users, Keyword, User) = (
self.tables.users,
self.classes.Keyword,
self.classes.User,
)
sess = create_session()
sel = users.select(users.c.id.in_([7, 8]))
with self._expect_implicit_subquery():
eq_(
sess.query(User)
.select_entity_from(sel)
.join("orders", "items", "keywords")
.filter(Keyword.name.in_(["red", "big", "round"]))
.all(),
[User(name="jack", id=7)],
)
with self._expect_implicit_subquery():
eq_(
sess.query(User)
.select_entity_from(sel)
.join("orders", "items", "keywords", aliased=True)
.filter(Keyword.name.in_(["red", "big", "round"]))
.all(),
[User(name="jack", id=7)],
)
def test_join_no_order_by(self):
User, users = self.classes.User, self.tables.users
sel = users.select(users.c.id.in_([7, 8]))
sess = create_session()
with self._expect_implicit_subquery():
eq_(
sess.query(User).select_entity_from(sel).all(),
[User(name="jack", id=7), User(name="ed", id=8)],
)
def test_replace_with_eager(self):
users, Address, User = (
self.tables.users,
self.classes.Address,
self.classes.User,
)
sel = users.select(users.c.id.in_([7, 8]))
sess = create_session()
def go():
with self._expect_implicit_subquery():
eq_(
sess.query(User)
.options(joinedload("addresses"))
.select_entity_from(sel)
.order_by(User.id)
.all(),
[
User(id=7, addresses=[Address(id=1)]),
User(
id=8,
addresses=[
Address(id=2),
Address(id=3),
Address(id=4),
],
),
],
)
self.assert_sql_count(testing.db, go, 1)
sess.expunge_all()
def go():
with self._expect_implicit_subquery():
eq_(
sess.query(User)
.options(joinedload("addresses"))
.select_entity_from(sel)
.filter(User.id == 8)
.order_by(User.id)
.all(),
[
User(
id=8,
addresses=[
Address(id=2),
Address(id=3),
Address(id=4),
],
)
],
)
self.assert_sql_count(testing.db, go, 1)
sess.expunge_all()
def go():
with self._expect_implicit_subquery():
eq_(
sess.query(User)
.options(joinedload("addresses"))
.select_entity_from(sel)
.order_by(User.id)[1],
User(
id=8,
addresses=[
Address(id=2),
Address(id=3),
Address(id=4),
],
),
)
self.assert_sql_count(testing.db, go, 1)
def test_onclause_conditional_adaption(self):
Item, Order, orders, order_items, User = (
self.classes.Item,
self.classes.Order,
self.tables.orders,
self.tables.order_items,
self.classes.User,
)
sess = Session()
oalias = orders.select()
with self._expect_implicit_subquery():
self.assert_compile(
sess.query(User)
.join(oalias, User.orders)
.join(
Item,
and_(
Order.id == order_items.c.order_id,
order_items.c.item_id == Item.id,
),
from_joinpoint=True,
),
"SELECT users.id AS users_id, users.name AS users_name "
"FROM users JOIN "
"(SELECT orders.id AS id, orders.user_id AS user_id, "
"orders.address_id AS address_id, orders.description "
"AS description, orders.isopen AS isopen FROM orders) "
"AS anon_1 ON users.id = anon_1.user_id JOIN items "
"ON anon_1.id = order_items.order_id "
"AND order_items.item_id = items.id",
use_default_dialect=True,
)
class DeprecatedInhTest(_poly_fixtures._Polymorphic):
def test_with_polymorphic(self):
Person = _poly_fixtures.Person
Engineer = _poly_fixtures.Engineer
with DeprecatedQueryTest._expect_implicit_subquery():
p_poly = with_polymorphic(Person, [Engineer], select([Person]))
is_true(
sa.inspect(p_poly).selectable.compare(select([Person]).subquery())
)
def test_multiple_adaption(self):
Company = _poly_fixtures.Company
Machine = _poly_fixtures.Machine
Engineer = _poly_fixtures.Engineer
people = self.tables.people
engineers = self.tables.engineers
machines = self.tables.machines
sess = create_session()
mach_alias = machines.select()
with DeprecatedQueryTest._expect_implicit_subquery():
self.assert_compile(
sess.query(Company)
.join(people.join(engineers), Company.employees)
.join(mach_alias, Engineer.machines, from_joinpoint=True)
.filter(Engineer.name == "dilbert")
.filter(Machine.name == "foo"),
"SELECT companies.company_id AS companies_company_id, "
"companies.name AS companies_name "
"FROM companies JOIN (people "
"JOIN engineers ON people.person_id = "
"engineers.person_id) ON companies.company_id = "
"people.company_id JOIN "
"(SELECT machines.machine_id AS machine_id, "
"machines.name AS name, "
"machines.engineer_id AS engineer_id "
"FROM machines) AS anon_1 "
"ON engineers.person_id = anon_1.engineer_id "
"WHERE people.name = :name_1 AND anon_1.name = :name_2",
use_default_dialect=True,
)
class DeprecatedMapperTest(_fixtures.FixtureTest, AssertsCompiledSQL):
__dialect__ = "default"
def test_polymorphic_union_w_select(self):
users, addresses = self.tables.users, self.tables.addresses
with DeprecatedQueryTest._expect_implicit_subquery():
dep = polymorphic_union(
{"u": users.select(), "a": addresses.select()},
"type",
"bcjoin",
)
subq_version = polymorphic_union(
{
"u": users.select().subquery(),
"a": addresses.select().subquery(),
},
"type",
"bcjoin",
)
is_true(dep.compare(subq_version))
def test_cancel_order_by(self):
users, User = self.tables.users, self.classes.User
with testing.expect_deprecated(
"The Mapper.order_by parameter is deprecated, and will be "
"removed in a future release."
):
mapper(User, users, order_by=users.c.name.desc())
assert (
"order by users.name desc"
in str(create_session().query(User).statement).lower()
)
assert (
"order by"
not in str(
create_session().query(User).order_by(None).statement
).lower()
)
assert (
"order by users.name asc"
in str(
create_session()
.query(User)
.order_by(User.name.asc())
.statement
).lower()
)
eq_(
create_session().query(User).all(),
[
User(id=7, name="jack"),
User(id=9, name="fred"),
User(id=8, name="ed"),
User(id=10, name="chuck"),
],
)
eq_(
create_session().query(User).order_by(User.name).all(),
[
User(id=10, name="chuck"),
User(id=8, name="ed"),
User(id=9, name="fred"),
User(id=7, name="jack"),
],
)
def test_comparable(self):
users = self.tables.users
class extendedproperty(property):
attribute = 123
def method1(self):
return "method1"
from sqlalchemy.orm.properties import ColumnProperty
class UCComparator(ColumnProperty.Comparator):
__hash__ = None
def method1(self):
return "uccmethod1"
def method2(self, other):
return "method2"
def __eq__(self, other):
cls = self.prop.parent.class_
col = getattr(cls, "name")
if other is None:
return col is None
else:
return sa.func.upper(col) == sa.func.upper(other)
def map_(with_explicit_property):
class User(object):
@extendedproperty
def uc_name(self):
if self.name is None:
return None
return self.name.upper()
if with_explicit_property:
args = (UCComparator, User.uc_name)
else:
args = (UCComparator,)
with assertions.expect_deprecated(
r"comparable_property\(\) is deprecated and will be "
"removed in a future release."
):
mapper(
User,
users,
properties=dict(uc_name=sa.orm.comparable_property(*args)),
)
return User
for User in (map_(True), map_(False)):
sess = create_session()
sess.begin()
q = sess.query(User)
assert hasattr(User, "name")
assert hasattr(User, "uc_name")
eq_(User.uc_name.method1(), "method1")
eq_(User.uc_name.method2("x"), "method2")
assert_raises_message(
AttributeError,
"Neither 'extendedproperty' object nor 'UCComparator' "
"object associated with User.uc_name has an attribute "
"'nonexistent'",
getattr,
User.uc_name,
"nonexistent",
)
# test compile
assert not isinstance(User.uc_name == "jack", bool)
u = q.filter(User.uc_name == "JACK").one()
assert u.uc_name == "JACK"
assert u not in sess.dirty
u.name = "some user name"
eq_(u.name, "some user name")
assert u in sess.dirty
eq_(u.uc_name, "SOME USER NAME")
sess.flush()
sess.expunge_all()
q = sess.query(User)
u2 = q.filter(User.name == "some user name").one()
u3 = q.filter(User.uc_name == "SOME USER NAME").one()
assert u2 is u3
eq_(User.uc_name.attribute, 123)
sess.rollback()
def test_comparable_column(self):
users, User = self.tables.users, self.classes.User
class MyComparator(sa.orm.properties.ColumnProperty.Comparator):
__hash__ = None
def __eq__(self, other):
# lower case comparison
return func.lower(self.__clause_element__()) == func.lower(
other
)
def intersects(self, other):
# non-standard comparator
return self.__clause_element__().op("&=")(other)
mapper(
User,
users,
properties={
"name": sa.orm.column_property(
users.c.name, comparator_factory=MyComparator
)
},
)
assert_raises_message(
AttributeError,
"Neither 'InstrumentedAttribute' object nor "
"'MyComparator' object associated with User.name has "
"an attribute 'nonexistent'",
getattr,
User.name,
"nonexistent",
)
eq_(
str(
(User.name == "ed").compile(
dialect=sa.engine.default.DefaultDialect()
)
),
"lower(users.name) = lower(:lower_1)",
)
eq_(
str(
(User.name.intersects("ed")).compile(
dialect=sa.engine.default.DefaultDialect()
)
),
"users.name &= :name_1",
)
def test_info(self):
class MyComposite(object):
pass
with assertions.expect_deprecated(
r"comparable_property\(\) is deprecated and will be "
"removed in a future release."
):
for constructor, args in [(comparable_property, "foo")]:
obj = constructor(info={"x": "y"}, *args)
eq_(obj.info, {"x": "y"})
obj.info["q"] = "p"
eq_(obj.info, {"x": "y", "q": "p"})
obj = constructor(*args)
eq_(obj.info, {})
obj.info["q"] = "p"
eq_(obj.info, {"q": "p"})
def test_add_property(self):
users = self.tables.users
assert_col = []
class User(fixtures.ComparableEntity):
def _get_name(self):
assert_col.append(("get", self._name))
return self._name
def _set_name(self, name):
assert_col.append(("set", name))
self._name = name
name = property(_get_name, _set_name)
def _uc_name(self):
if self._name is None:
return None
return self._name.upper()
uc_name = property(_uc_name)
uc_name2 = property(_uc_name)
m = mapper(User, users)
class UCComparator(PropComparator):
__hash__ = None
def __eq__(self, other):
cls = self.prop.parent.class_
col = getattr(cls, "name")
if other is None:
return col is None
else:
return func.upper(col) == func.upper(other)
m.add_property("_name", deferred(users.c.name))
m.add_property("name", synonym("_name"))
with assertions.expect_deprecated(
r"comparable_property\(\) is deprecated and will be "
"removed in a future release."
):
m.add_property("uc_name", comparable_property(UCComparator))
m.add_property(
"uc_name2", comparable_property(UCComparator, User.uc_name2)
)
sess = create_session(autocommit=False)
assert sess.query(User).get(7)
u = sess.query(User).filter_by(name="jack").one()
def go():
eq_(u.name, "jack")
eq_(u.uc_name, "JACK")
eq_(u.uc_name2, "JACK")
eq_(assert_col, [("get", "jack")], str(assert_col))
self.sql_count_(1, go)
def test_kwarg_accepted(self):
class DummyComposite(object):
def __init__(self, x, y):
pass
class MyFactory(PropComparator):
pass
with assertions.expect_deprecated(
r"comparable_property\(\) is deprecated and will be "
"removed in a future release."
):
for args in ((comparable_property,),):
fn = args[0]
args = args[1:]
fn(comparator_factory=MyFactory, *args)
def test_merge_synonym_comparable(self):
users = self.tables.users
class User(object):
class Comparator(PropComparator):
pass
def _getValue(self):
return self._value
def _setValue(self, value):
setattr(self, "_value", value)
value = property(_getValue, _setValue)
with assertions.expect_deprecated(
r"comparable_property\(\) is deprecated and will be "
"removed in a future release."
):
mapper(
User,
users,
properties={
"uid": synonym("id"),
"foobar": comparable_property(User.Comparator, User.value),
},
)
sess = create_session()
u = User()
u.name = "ed"
sess.add(u)
sess.flush()
sess.expunge(u)
sess.merge(u)
class DeprecatedDeclTest(fixtures.TestBase):
@testing.provide_metadata
def test_comparable_using(self):
class NameComparator(sa.orm.PropComparator):
@property
def upperself(self):
cls = self.prop.parent.class_
col = getattr(cls, "name")
return sa.func.upper(col)
def operate(self, op, other, **kw):
return op(self.upperself, other, **kw)
Base = declarative_base(metadata=self.metadata)
with testing.expect_deprecated(
r"comparable_property\(\) is deprecated and will be "
"removed in a future release."
):
class User(Base, fixtures.ComparableEntity):
__tablename__ = "users"
id = Column(
"id",
Integer,
primary_key=True,
test_needs_autoincrement=True,
)
name = Column("name", String(50))
@comparable_using(NameComparator)
@property
def uc_name(self):
return self.name is not None and self.name.upper() or None
Base.metadata.create_all()
sess = create_session()
u1 = User(name="someuser")
eq_(u1.name, "someuser", u1.name)
eq_(u1.uc_name, "SOMEUSER", u1.uc_name)
sess.add(u1)
sess.flush()
sess.expunge_all()
rt = sess.query(User).filter(User.uc_name == "SOMEUSER").one()
eq_(rt, u1)
sess.expunge_all()
rt = sess.query(User).filter(User.uc_name.startswith("SOMEUSE")).one()
eq_(rt, u1)
class DeprecatedMapperExtensionTest(_fixtures.FixtureTest):
run_inserts = None
def extension(self):
methods = []
class Ext(MapperExtension):
def instrument_class(self, mapper, cls):
methods.append("instrument_class")
return EXT_CONTINUE
def init_instance(
self, mapper, class_, oldinit, instance, args, kwargs
):
methods.append("init_instance")
return EXT_CONTINUE
def init_failed(
self, mapper, class_, oldinit, instance, args, kwargs
):
methods.append("init_failed")
return EXT_CONTINUE
def reconstruct_instance(self, mapper, instance):
methods.append("reconstruct_instance")
return EXT_CONTINUE
def before_insert(self, mapper, connection, instance):
methods.append("before_insert")
return EXT_CONTINUE
def after_insert(self, mapper, connection, instance):
methods.append("after_insert")
return EXT_CONTINUE
def before_update(self, mapper, connection, instance):
methods.append("before_update")
return EXT_CONTINUE
def after_update(self, mapper, connection, instance):
methods.append("after_update")
return EXT_CONTINUE
def before_delete(self, mapper, connection, instance):
methods.append("before_delete")
return EXT_CONTINUE
def after_delete(self, mapper, connection, instance):
methods.append("after_delete")
return EXT_CONTINUE
return Ext, methods
def test_basic(self):
User, users = self.classes.User, self.tables.users
Ext, methods = self.extension()
with testing.expect_deprecated(
"MapperExtension is deprecated in favor of the MapperEvents",
"MapperExtension.before_insert is deprecated",
"MapperExtension.instrument_class is deprecated",
"MapperExtension.init_instance is deprecated",
"MapperExtension.after_insert is deprecated",
"MapperExtension.reconstruct_instance is deprecated",
"MapperExtension.before_delete is deprecated",
"MapperExtension.after_delete is deprecated",
"MapperExtension.before_update is deprecated",
"MapperExtension.after_update is deprecated",
"MapperExtension.init_failed is deprecated",
):
mapper(User, users, extension=Ext())
sess = create_session()
u = User(name="u1")
sess.add(u)
sess.flush()
u = sess.query(User).populate_existing().get(u.id)
sess.expunge_all()
u = sess.query(User).get(u.id)
u.name = "u1 changed"
sess.flush()
sess.delete(u)
sess.flush()
eq_(
methods,
[
"instrument_class",
"init_instance",
"before_insert",
"after_insert",
"reconstruct_instance",
"before_update",
"after_update",
"before_delete",
"after_delete",
],
)
def test_inheritance(self):
users, addresses, User = (
self.tables.users,
self.tables.addresses,
self.classes.User,
)
Ext, methods = self.extension()
class AdminUser(User):
pass
with testing.expect_deprecated(
"MapperExtension is deprecated in favor of the MapperEvents",
"MapperExtension.before_insert is deprecated",
"MapperExtension.instrument_class is deprecated",
"MapperExtension.init_instance is deprecated",
"MapperExtension.after_insert is deprecated",
"MapperExtension.reconstruct_instance is deprecated",
"MapperExtension.before_delete is deprecated",
"MapperExtension.after_delete is deprecated",
"MapperExtension.before_update is deprecated",
"MapperExtension.after_update is deprecated",
"MapperExtension.init_failed is deprecated",
):
mapper(User, users, extension=Ext())
mapper(
AdminUser,
addresses,
inherits=User,
properties={"address_id": addresses.c.id},
)
sess = create_session()
am = AdminUser(name="au1", email_address="au1@e1")
sess.add(am)
sess.flush()
am = sess.query(AdminUser).populate_existing().get(am.id)
sess.expunge_all()
am = sess.query(AdminUser).get(am.id)
am.name = "au1 changed"
sess.flush()
sess.delete(am)
sess.flush()
eq_(
methods,
[
"instrument_class",
"instrument_class",
"init_instance",
"before_insert",
"after_insert",
"reconstruct_instance",
"before_update",
"after_update",
"before_delete",
"after_delete",
],
)
def test_before_after_only_collection(self):
keywords, items, item_keywords, Keyword, Item = (
self.tables.keywords,
self.tables.items,
self.tables.item_keywords,
self.classes.Keyword,
self.classes.Item,
)
Ext1, methods1 = self.extension()
Ext2, methods2 = self.extension()
with testing.expect_deprecated(
"MapperExtension is deprecated in favor of the MapperEvents",
"MapperExtension.before_insert is deprecated",
"MapperExtension.instrument_class is deprecated",
"MapperExtension.init_instance is deprecated",
"MapperExtension.after_insert is deprecated",
"MapperExtension.reconstruct_instance is deprecated",
"MapperExtension.before_delete is deprecated",
"MapperExtension.after_delete is deprecated",
"MapperExtension.before_update is deprecated",
"MapperExtension.after_update is deprecated",
"MapperExtension.init_failed is deprecated",
):
mapper(
Item,
items,
extension=Ext1(),
properties={
"keywords": relationship(Keyword, secondary=item_keywords)
},
)
with testing.expect_deprecated(
"MapperExtension is deprecated in favor of the MapperEvents",
"MapperExtension.before_insert is deprecated",
"MapperExtension.instrument_class is deprecated",
"MapperExtension.init_instance is deprecated",
"MapperExtension.after_insert is deprecated",
"MapperExtension.reconstruct_instance is deprecated",
"MapperExtension.before_delete is deprecated",
"MapperExtension.after_delete is deprecated",
"MapperExtension.before_update is deprecated",
"MapperExtension.after_update is deprecated",
"MapperExtension.init_failed is deprecated",
):
mapper(Keyword, keywords, extension=Ext2())
sess = create_session()
i1 = Item(description="i1")
k1 = Keyword(name="k1")
sess.add(i1)
sess.add(k1)
sess.flush()
eq_(
methods1,
[
"instrument_class",
"init_instance",
"before_insert",
"after_insert",
],
)
eq_(
methods2,
[
"instrument_class",
"init_instance",
"before_insert",
"after_insert",
],
)
del methods1[:]
del methods2[:]
i1.keywords.append(k1)
sess.flush()
eq_(methods1, ["before_update", "after_update"])
eq_(methods2, [])
def test_inheritance_with_dupes(self):
users, addresses, User = (
self.tables.users,
self.tables.addresses,
self.classes.User,
)
Ext, methods = self.extension()
class AdminUser(User):
pass
ext = Ext()
with testing.expect_deprecated(
"MapperExtension is deprecated in favor of the MapperEvents",
"MapperExtension.before_insert is deprecated",
"MapperExtension.instrument_class is deprecated",
"MapperExtension.init_instance is deprecated",
"MapperExtension.after_insert is deprecated",
"MapperExtension.reconstruct_instance is deprecated",
"MapperExtension.before_delete is deprecated",
"MapperExtension.after_delete is deprecated",
"MapperExtension.before_update is deprecated",
"MapperExtension.after_update is deprecated",
"MapperExtension.init_failed is deprecated",
):
mapper(User, users, extension=ext)
with testing.expect_deprecated(
"MapperExtension is deprecated in favor of the MapperEvents"
):
mapper(
AdminUser,
addresses,
inherits=User,
extension=ext,
properties={"address_id": addresses.c.id},
)
sess = create_session()
am = AdminUser(name="au1", email_address="au1@e1")
sess.add(am)
sess.flush()
am = sess.query(AdminUser).populate_existing().get(am.id)
sess.expunge_all()
am = sess.query(AdminUser).get(am.id)
am.name = "au1 changed"
sess.flush()
sess.delete(am)
sess.flush()
eq_(
methods,
[
"instrument_class",
"instrument_class",
"init_instance",
"before_insert",
"after_insert",
"reconstruct_instance",
"before_update",
"after_update",
"before_delete",
"after_delete",
],
)
def test_unnecessary_methods_not_evented(self):
users = self.tables.users
class MyExtension(MapperExtension):
def before_insert(self, mapper, connection, instance):
pass
class Foo(object):
pass
with testing.expect_deprecated(
"MapperExtension is deprecated in favor of the MapperEvents",
"MapperExtension.before_insert is deprecated",
):
m = mapper(Foo, users, extension=MyExtension())
assert not m.class_manager.dispatch.load
assert not m.dispatch.before_update
assert len(m.dispatch.before_insert) == 1
class DeprecatedSessionExtensionTest(_fixtures.FixtureTest):
run_inserts = None
def test_extension(self):
User, users = self.classes.User, self.tables.users
mapper(User, users)
log = []
class MyExt(SessionExtension):
def before_commit(self, session):
log.append("before_commit")
def after_commit(self, session):
log.append("after_commit")
def after_rollback(self, session):
log.append("after_rollback")
def before_flush(self, session, flush_context, objects):
log.append("before_flush")
def after_flush(self, session, flush_context):
log.append("after_flush")
def after_flush_postexec(self, session, flush_context):
log.append("after_flush_postexec")
def after_begin(self, session, transaction, connection):
log.append("after_begin")
def after_attach(self, session, instance):
log.append("after_attach")
def after_bulk_update(self, session, query, query_context, result):
log.append("after_bulk_update")
def after_bulk_delete(self, session, query, query_context, result):
log.append("after_bulk_delete")
with testing.expect_deprecated(
"SessionExtension is deprecated in favor of " "the SessionEvents",
"SessionExtension.before_commit is deprecated",
"SessionExtension.after_commit is deprecated",
"SessionExtension.after_begin is deprecated",
"SessionExtension.after_attach is deprecated",
"SessionExtension.before_flush is deprecated",
"SessionExtension.after_flush is deprecated",
"SessionExtension.after_flush_postexec is deprecated",
"SessionExtension.after_rollback is deprecated",
"SessionExtension.after_bulk_update is deprecated",
"SessionExtension.after_bulk_delete is deprecated",
):
sess = create_session(extension=MyExt())
u = User(name="u1")
sess.add(u)
sess.flush()
assert log == [
"after_attach",
"before_flush",
"after_begin",
"after_flush",
"after_flush_postexec",
"before_commit",
"after_commit",
]
log = []
with testing.expect_deprecated(
"SessionExtension is deprecated in favor of " "the SessionEvents",
"SessionExtension.before_commit is deprecated",
"SessionExtension.after_commit is deprecated",
"SessionExtension.after_begin is deprecated",
"SessionExtension.after_attach is deprecated",
"SessionExtension.before_flush is deprecated",
"SessionExtension.after_flush is deprecated",
"SessionExtension.after_flush_postexec is deprecated",
"SessionExtension.after_rollback is deprecated",
"SessionExtension.after_bulk_update is deprecated",
"SessionExtension.after_bulk_delete is deprecated",
):
sess = create_session(autocommit=False, extension=MyExt())
u = User(name="u1")
sess.add(u)
sess.flush()
assert log == [
"after_attach",
"before_flush",
"after_begin",
"after_flush",
"after_flush_postexec",
]
log = []
u.name = "ed"
sess.commit()
assert log == [
"before_commit",
"before_flush",
"after_flush",
"after_flush_postexec",
"after_commit",
]
log = []
sess.commit()
assert log == ["before_commit", "after_commit"]
log = []
sess.query(User).delete()
assert log == ["after_begin", "after_bulk_delete"]
log = []
sess.query(User).update({"name": "foo"})
assert log == ["after_bulk_update"]
log = []
with testing.expect_deprecated(
"SessionExtension is deprecated in favor of " "the SessionEvents",
"SessionExtension.before_commit is deprecated",
"SessionExtension.after_commit is deprecated",
"SessionExtension.after_begin is deprecated",
"SessionExtension.after_attach is deprecated",
"SessionExtension.before_flush is deprecated",
"SessionExtension.after_flush is deprecated",
"SessionExtension.after_flush_postexec is deprecated",
"SessionExtension.after_rollback is deprecated",
"SessionExtension.after_bulk_update is deprecated",
"SessionExtension.after_bulk_delete is deprecated",
):
sess = create_session(
autocommit=False, extension=MyExt(), bind=testing.db
)
sess.connection()
assert log == ["after_begin"]
sess.close()
def test_multiple_extensions(self):
User, users = self.classes.User, self.tables.users
log = []
class MyExt1(SessionExtension):
def before_commit(self, session):
log.append("before_commit_one")
class MyExt2(SessionExtension):
def before_commit(self, session):
log.append("before_commit_two")
mapper(User, users)
with testing.expect_deprecated(
"SessionExtension is deprecated in favor of " "the SessionEvents",
"SessionExtension.before_commit is deprecated",
):
sess = create_session(extension=[MyExt1(), MyExt2()])
u = User(name="u1")
sess.add(u)
sess.flush()
assert log == ["before_commit_one", "before_commit_two"]
def test_unnecessary_methods_not_evented(self):
class MyExtension(SessionExtension):
def before_commit(self, session):
pass
with testing.expect_deprecated(
"SessionExtension is deprecated in favor of " "the SessionEvents",
"SessionExtension.before_commit is deprecated.",
):
s = Session(extension=MyExtension())
assert not s.dispatch.after_commit
assert len(s.dispatch.before_commit) == 1
class DeprecatedAttributeExtensionTest1(fixtures.ORMTest):
def test_extension_commit_attr(self):
class Foo(fixtures.BasicEntity):
pass
class Bar(fixtures.BasicEntity):
pass
class ReceiveEvents(AttributeExtension):
def __init__(self, key):
self.key = key
def append(self, state, child, initiator):
if commit:
state._commit_all(state.dict)
return child
def remove(self, state, child, initiator):
if commit:
state._commit_all(state.dict)
return child
def set(self, state, child, oldchild, initiator):
if commit:
state._commit_all(state.dict)
return child
instrumentation.register_class(Foo)
instrumentation.register_class(Bar)
b1, b2, b3, b4 = Bar(id="b1"), Bar(id="b2"), Bar(id="b3"), Bar(id="b4")
def loadcollection(state, passive):
if passive is attributes.PASSIVE_NO_FETCH:
return attributes.PASSIVE_NO_RESULT
return [b1, b2]
def loadscalar(state, passive):
if passive is attributes.PASSIVE_NO_FETCH:
return attributes.PASSIVE_NO_RESULT
return b2
with testing.expect_deprecated(
"AttributeExtension.append is deprecated.",
"AttributeExtension.remove is deprecated.",
"AttributeExtension.set is deprecated.",
):
attributes.register_attribute(
Foo,
"bars",
uselist=True,
useobject=True,
callable_=loadcollection,
extension=[ReceiveEvents("bars")],
)
with testing.expect_deprecated(
"AttributeExtension.append is deprecated.",
"AttributeExtension.remove is deprecated.",
"AttributeExtension.set is deprecated.",
):
attributes.register_attribute(
Foo,
"bar",
uselist=False,
useobject=True,
callable_=loadscalar,
extension=[ReceiveEvents("bar")],
)
with testing.expect_deprecated(
"AttributeExtension.append is deprecated.",
"AttributeExtension.remove is deprecated.",
"AttributeExtension.set is deprecated.",
):
attributes.register_attribute(
Foo,
"scalar",
uselist=False,
useobject=False,
extension=[ReceiveEvents("scalar")],
)
def create_hist():
def hist(key, fn, *arg):
attributes.instance_state(f1)._commit_all(
attributes.instance_dict(f1)
)
fn(*arg)
histories.append(attributes.get_history(f1, key))
f1 = Foo()
hist("bars", f1.bars.append, b3)
hist("bars", f1.bars.append, b4)
hist("bars", f1.bars.remove, b2)
hist("bar", setattr, f1, "bar", b3)
hist("bar", setattr, f1, "bar", None)
hist("bar", setattr, f1, "bar", b4)
hist("scalar", setattr, f1, "scalar", 5)
hist("scalar", setattr, f1, "scalar", None)
hist("scalar", setattr, f1, "scalar", 4)
histories = []
commit = False
create_hist()
without_commit = list(histories)
histories[:] = []
commit = True
create_hist()
with_commit = histories
for without, with_ in zip(without_commit, with_commit):
woc = without
wic = with_
eq_(woc, wic)
def test_extension_lazyload_assertion(self):
class Foo(fixtures.BasicEntity):
pass
class Bar(fixtures.BasicEntity):
pass
class ReceiveEvents(AttributeExtension):
def append(self, state, child, initiator):
state.obj().bars
return child
def remove(self, state, child, initiator):
state.obj().bars
return child
def set(self, state, child, oldchild, initiator):
return child
instrumentation.register_class(Foo)
instrumentation.register_class(Bar)
bar1, bar2, bar3 = [Bar(id=1), Bar(id=2), Bar(id=3)]
def func1(state, passive):
if passive is attributes.PASSIVE_NO_FETCH:
return attributes.PASSIVE_NO_RESULT
return [bar1, bar2, bar3]
with testing.expect_deprecated(
"AttributeExtension.append is deprecated.",
"AttributeExtension.remove is deprecated.",
"AttributeExtension.set is deprecated.",
):
attributes.register_attribute(
Foo,
"bars",
uselist=True,
callable_=func1,
useobject=True,
extension=[ReceiveEvents()],
)
attributes.register_attribute(
Bar, "foos", uselist=True, useobject=True, backref="bars"
)
x = Foo()
assert_raises(AssertionError, Bar(id=4).foos.append, x)
x.bars
b = Bar(id=4)
b.foos.append(x)
attributes.instance_state(x)._expire_attributes(
attributes.instance_dict(x), ["bars"]
)
assert_raises(AssertionError, b.foos.remove, x)
def test_scalar_listener(self):
# listeners on ScalarAttributeImpl aren't used normally. test that
class Foo(object):
pass
results = []
class ReceiveEvents(AttributeExtension):
def append(self, state, child, initiator):
assert False
def remove(self, state, child, initiator):
results.append(("remove", state.obj(), child))
def set(self, state, child, oldchild, initiator):
results.append(("set", state.obj(), child, oldchild))
return child
instrumentation.register_class(Foo)
with testing.expect_deprecated(
"AttributeExtension.append is deprecated.",
"AttributeExtension.remove is deprecated.",
"AttributeExtension.set is deprecated.",
):
attributes.register_attribute(
Foo,
"x",
uselist=False,
useobject=False,
extension=ReceiveEvents(),
)
f = Foo()
f.x = 5
f.x = 17
del f.x
eq_(
results,
[
("set", f, 5, attributes.NEVER_SET),
("set", f, 17, 5),
("remove", f, 17),
],
)
def test_cascading_extensions(self):
t1 = Table(
"t1",
MetaData(),
Column("id", Integer, primary_key=True),
Column("type", String(40)),
Column("data", String(50)),
)
ext_msg = []
class Ex1(AttributeExtension):
def set(self, state, value, oldvalue, initiator):
ext_msg.append("Ex1 %r" % value)
return "ex1" + value
class Ex2(AttributeExtension):
def set(self, state, value, oldvalue, initiator):
ext_msg.append("Ex2 %r" % value)
return "ex2" + value
class A(fixtures.BasicEntity):
pass
class B(A):
pass
class C(B):
pass
with testing.expect_deprecated(
"AttributeExtension is deprecated in favor of the "
"AttributeEvents listener interface. "
"The column_property.extension parameter"
):
mapper(
A,
t1,
polymorphic_on=t1.c.type,
polymorphic_identity="a",
properties={
"data": column_property(t1.c.data, extension=Ex1())
},
)
mapper(B, polymorphic_identity="b", inherits=A)
with testing.expect_deprecated(
"AttributeExtension is deprecated in favor of the "
"AttributeEvents listener interface. "
"The column_property.extension parameter"
):
mapper(
C,
polymorphic_identity="c",
inherits=B,
properties={
"data": column_property(t1.c.data, extension=Ex2())
},
)
with testing.expect_deprecated(
"AttributeExtension.set is deprecated. "
):
configure_mappers()
a1 = A(data="a1")
b1 = B(data="b1")
c1 = C(data="c1")
eq_(a1.data, "ex1a1")
eq_(b1.data, "ex1b1")
eq_(c1.data, "ex2c1")
a1.data = "a2"
b1.data = "b2"
c1.data = "c2"
eq_(a1.data, "ex1a2")
eq_(b1.data, "ex1b2")
eq_(c1.data, "ex2c2")
eq_(
ext_msg,
[
"Ex1 'a1'",
"Ex1 'b1'",
"Ex2 'c1'",
"Ex1 'a2'",
"Ex1 'b2'",
"Ex2 'c2'",
],
)
class DeprecatedOptionAllTest(OptionsPathTest, _fixtures.FixtureTest):
run_inserts = "once"
run_deletes = None
def _mapper_fixture_one(self):
users, User, addresses, Address, orders, Order = (
self.tables.users,
self.classes.User,
self.tables.addresses,
self.classes.Address,
self.tables.orders,
self.classes.Order,
)
keywords, items, item_keywords, Keyword, Item = (
self.tables.keywords,
self.tables.items,
self.tables.item_keywords,
self.classes.Keyword,
self.classes.Item,
)
mapper(
User,
users,
properties={
"addresses": relationship(Address),
"orders": relationship(Order),
},
)
mapper(Address, addresses)
mapper(
Order,
orders,
properties={
"items": relationship(Item, secondary=self.tables.order_items)
},
)
mapper(
Keyword,
keywords,
properties={
"keywords": column_property(keywords.c.name + "some keyword")
},
)
mapper(
Item,
items,
properties=dict(
keywords=relationship(Keyword, secondary=item_keywords)
),
)
def _assert_eager_with_entity_exception(
self, entity_list, options, message
):
assert_raises_message(
sa.exc.ArgumentError,
message,
create_session().query(*entity_list).options,
*options
)
def test_option_against_nonexistent_twolevel_all(self):
self._mapper_fixture_one()
Item = self.classes.Item
with testing.expect_deprecated(
r"The joinedload_all\(\) function is deprecated, and "
"will be removed in a future release. "
r"Please use method chaining with joinedload\(\)"
):
self._assert_eager_with_entity_exception(
[Item],
(joinedload_all("keywords.foo"),),
'Can\'t find property named \\"foo\\" on mapped class '
"Keyword->keywords in this Query.",
)
def test_all_path_vs_chained(self):
self._mapper_fixture_one()
User = self.classes.User
Order = self.classes.Order
Item = self.classes.Item
with testing.expect_deprecated(
r"The joinedload_all\(\) function is deprecated, and "
"will be removed in a future release. "
r"Please use method chaining with joinedload\(\)"
):
l1 = joinedload_all("orders.items.keywords")
sess = Session()
q = sess.query(User)
self._assert_path_result(
l1,
q,
[
(User, "orders"),
(User, "orders", Order, "items"),
(User, "orders", Order, "items", Item, "keywords"),
],
)
l2 = joinedload("orders").joinedload("items").joinedload("keywords")
self._assert_path_result(
l2,
q,
[
(User, "orders"),
(User, "orders", Order, "items"),
(User, "orders", Order, "items", Item, "keywords"),
],
)
def test_subqueryload_mapper_order_by(self):
users, User, Address, addresses = (
self.tables.users,
self.classes.User,
self.classes.Address,
self.tables.addresses,
)
mapper(Address, addresses)
with testing.expect_deprecated(
".*Mapper.order_by parameter is deprecated"
):
mapper(
User,
users,
properties={
"addresses": relationship(
Address, lazy="subquery", order_by=addresses.c.id
)
},
order_by=users.c.id.desc(),
)
sess = create_session()
q = sess.query(User)
result = q.limit(2).all()
eq_(result, list(reversed(self.static.user_address_result[2:4])))
def test_selectinload_mapper_order_by(self):
users, User, Address, addresses = (
self.tables.users,
self.classes.User,
self.classes.Address,
self.tables.addresses,
)
mapper(Address, addresses)
with testing.expect_deprecated(
".*Mapper.order_by parameter is deprecated"
):
mapper(
User,
users,
properties={
"addresses": relationship(
Address, lazy="selectin", order_by=addresses.c.id
)
},
order_by=users.c.id.desc(),
)
sess = create_session()
q = sess.query(User)
result = q.limit(2).all()
eq_(result, list(reversed(self.static.user_address_result[2:4])))
def test_join_mapper_order_by(self):
User, users = self.classes.User, self.tables.users
with testing.expect_deprecated(
".*Mapper.order_by parameter is deprecated"
):
mapper(User, users, order_by=users.c.id)
sel = users.select(users.c.id.in_([7, 8]))
sess = create_session()
with DeprecatedQueryTest._expect_implicit_subquery():
eq_(
sess.query(User).select_entity_from(sel).all(),
[User(name="jack", id=7), User(name="ed", id=8)],
)
def test_defer_addtl_attrs(self):
users, User, Address, addresses = (
self.tables.users,
self.classes.User,
self.classes.Address,
self.tables.addresses,
)
mapper(Address, addresses)
mapper(
User,
users,
properties={
"addresses": relationship(
Address, lazy="selectin", order_by=addresses.c.id
)
},
)
sess = create_session()
with testing.expect_deprecated(
r"The \*addl_attrs on orm.defer is deprecated. "
"Please use method chaining"
):
sess.query(User).options(defer("addresses", "email_address"))
with testing.expect_deprecated(
r"The \*addl_attrs on orm.undefer is deprecated. "
"Please use method chaining"
):
sess.query(User).options(undefer("addresses", "email_address"))
class LegacyLockModeTest(_fixtures.FixtureTest):
run_inserts = None
@classmethod
def setup_mappers(cls):
User, users = cls.classes.User, cls.tables.users
mapper(User, users)
def _assert_legacy(self, arg, read=False, nowait=False):
User = self.classes.User
s = Session()
with testing.expect_deprecated(
r"The Query.with_lockmode\(\) method is deprecated"
):
q = s.query(User).with_lockmode(arg)
sel = q._compile_context().statement
if arg is None:
assert q._for_update_arg is None
assert sel._for_update_arg is None
return
assert q._for_update_arg.read is read
assert q._for_update_arg.nowait is nowait
assert sel._for_update_arg.read is read
assert sel._for_update_arg.nowait is nowait
def test_false_legacy(self):
self._assert_legacy(None)
def test_plain_legacy(self):
self._assert_legacy("update")
def test_nowait_legacy(self):
self._assert_legacy("update_nowait", nowait=True)
def test_read_legacy(self):
self._assert_legacy("read", read=True)
def test_unknown_legacy_lock_mode(self):
User = self.classes.User
sess = Session()
with testing.expect_deprecated(
r"The Query.with_lockmode\(\) method is deprecated"
):
assert_raises_message(
exc.ArgumentError,
"Unknown with_lockmode argument: 'unknown_mode'",
sess.query(User.id).with_lockmode,
"unknown_mode",
)
class InstrumentationTest(fixtures.ORMTest):
def test_dict_subclass4(self):
# tests #2654
with testing.expect_deprecated(
r"The collection.converter\(\) handler is deprecated and will "
"be removed in a future release. Please refer to the "
"AttributeEvents"
):
class MyDict(collections.MappedCollection):
def __init__(self):
super(MyDict, self).__init__(lambda value: "k%d" % value)
@collection.converter
def _convert(self, dictlike):
for key, value in dictlike.items():
yield value + 5
class Foo(object):
pass
instrumentation.register_class(Foo)
attributes.register_attribute(
Foo, "attr", uselist=True, typecallable=MyDict, useobject=True
)
f = Foo()
f.attr = {"k1": 1, "k2": 2}
eq_(f.attr, {"k7": 7, "k6": 6})
def test_name_setup(self):
with testing.expect_deprecated(
r"The collection.converter\(\) handler is deprecated and will "
"be removed in a future release. Please refer to the "
"AttributeEvents"
):
class Base(object):
@collection.iterator
def base_iterate(self, x):
return "base_iterate"
@collection.appender
def base_append(self, x):
return "base_append"
@collection.converter
def base_convert(self, x):
return "base_convert"
@collection.remover
def base_remove(self, x):
return "base_remove"
from sqlalchemy.orm.collections import _instrument_class
_instrument_class(Base)
eq_(Base._sa_remover(Base(), 5), "base_remove")
eq_(Base._sa_appender(Base(), 5), "base_append")
eq_(Base._sa_iterator(Base(), 5), "base_iterate")
eq_(Base._sa_converter(Base(), 5), "base_convert")
with testing.expect_deprecated(
r"The collection.converter\(\) handler is deprecated and will "
"be removed in a future release. Please refer to the "
"AttributeEvents"
):
class Sub(Base):
@collection.converter
def base_convert(self, x):
return "sub_convert"
@collection.remover
def sub_remove(self, x):
return "sub_remove"
_instrument_class(Sub)
eq_(Sub._sa_appender(Sub(), 5), "base_append")
eq_(Sub._sa_remover(Sub(), 5), "sub_remove")
eq_(Sub._sa_iterator(Sub(), 5), "base_iterate")
eq_(Sub._sa_converter(Sub(), 5), "sub_convert")
def test_link_event(self):
canary = []
with testing.expect_deprecated(
r"The collection.linker\(\) handler is deprecated and will "
"be removed in a future release. Please refer to the "
"AttributeEvents"
):
class Collection(list):
@collection.linker
def _on_link(self, obj):
canary.append(obj)
class Foo(object):
pass
instrumentation.register_class(Foo)
attributes.register_attribute(
Foo, "attr", uselist=True, typecallable=Collection, useobject=True
)
f1 = Foo()
f1.attr.append(3)
eq_(canary, [f1.attr._sa_adapter])
adapter_1 = f1.attr._sa_adapter
l2 = Collection()
f1.attr = l2
eq_(canary, [adapter_1, f1.attr._sa_adapter, None])
class NonPrimaryRelationshipLoaderTest(_fixtures.FixtureTest):
run_inserts = "once"
run_deletes = None
def test_selectload(self):
users, orders, User, Address, Order, addresses = (
self.tables.users,
self.tables.orders,
self.classes.User,
self.classes.Address,
self.classes.Order,
self.tables.addresses,
)
openorders = sa.alias(orders, "openorders")
closedorders = sa.alias(orders, "closedorders")
mapper(Address, addresses)
mapper(Order, orders)
with testing.expect_deprecated(
"The mapper.non_primary parameter is deprecated"
):
open_mapper = mapper(Order, openorders, non_primary=True)
closed_mapper = mapper(Order, closedorders, non_primary=True)
mapper(
User,
users,
properties=dict(
addresses=relationship(Address, lazy=True),
open_orders=relationship(
open_mapper,
primaryjoin=sa.and_(
openorders.c.isopen == 1,
users.c.id == openorders.c.user_id,
),
lazy="select",
),
closed_orders=relationship(
closed_mapper,
primaryjoin=sa.and_(
closedorders.c.isopen == 0,
users.c.id == closedorders.c.user_id,
),
lazy="select",
),
),
)
self._run_double_test(10)
def test_joinedload(self):
users, orders, User, Address, Order, addresses = (
self.tables.users,
self.tables.orders,
self.classes.User,
self.classes.Address,
self.classes.Order,
self.tables.addresses,
)
openorders = sa.alias(orders, "openorders")
closedorders = sa.alias(orders, "closedorders")
mapper(Address, addresses)
mapper(Order, orders)
with testing.expect_deprecated(
"The mapper.non_primary parameter is deprecated"
):
open_mapper = mapper(Order, openorders, non_primary=True)
closed_mapper = mapper(Order, closedorders, non_primary=True)
mapper(
User,
users,
properties=dict(
addresses=relationship(
Address, lazy="joined", order_by=addresses.c.id
),
open_orders=relationship(
open_mapper,
primaryjoin=sa.and_(
openorders.c.isopen == 1,
users.c.id == openorders.c.user_id,
),
lazy="joined",
order_by=openorders.c.id,
),
closed_orders=relationship(
closed_mapper,
primaryjoin=sa.and_(
closedorders.c.isopen == 0,
users.c.id == closedorders.c.user_id,
),
lazy="joined",
order_by=closedorders.c.id,
),
),
)
self._run_double_test(1)
def test_selectin(self):
users, orders, User, Address, Order, addresses = (
self.tables.users,
self.tables.orders,
self.classes.User,
self.classes.Address,
self.classes.Order,
self.tables.addresses,
)
openorders = sa.alias(orders, "openorders")
closedorders = sa.alias(orders, "closedorders")
mapper(Address, addresses)
mapper(Order, orders)
with testing.expect_deprecated(
"The mapper.non_primary parameter is deprecated"
):
open_mapper = mapper(Order, openorders, non_primary=True)
closed_mapper = mapper(Order, closedorders, non_primary=True)
mapper(
User,
users,
properties=dict(
addresses=relationship(
Address, lazy="selectin", order_by=addresses.c.id
),
open_orders=relationship(
open_mapper,
primaryjoin=sa.and_(
openorders.c.isopen == 1,
users.c.id == openorders.c.user_id,
),
lazy="selectin",
order_by=openorders.c.id,
),
closed_orders=relationship(
closed_mapper,
primaryjoin=sa.and_(
closedorders.c.isopen == 0,
users.c.id == closedorders.c.user_id,
),
lazy="selectin",
order_by=closedorders.c.id,
),
),
)
self._run_double_test(4)
def test_subqueryload(self):
users, orders, User, Address, Order, addresses = (
self.tables.users,
self.tables.orders,
self.classes.User,
self.classes.Address,
self.classes.Order,
self.tables.addresses,
)
openorders = sa.alias(orders, "openorders")
closedorders = sa.alias(orders, "closedorders")
mapper(Address, addresses)
mapper(Order, orders)
with testing.expect_deprecated(
"The mapper.non_primary parameter is deprecated"
):
open_mapper = mapper(Order, openorders, non_primary=True)
closed_mapper = mapper(Order, closedorders, non_primary=True)
mapper(
User,
users,
properties=dict(
addresses=relationship(
Address, lazy="subquery", order_by=addresses.c.id
),
open_orders=relationship(
open_mapper,
primaryjoin=sa.and_(
openorders.c.isopen == 1,
users.c.id == openorders.c.user_id,
),
lazy="subquery",
order_by=openorders.c.id,
),
closed_orders=relationship(
closed_mapper,
primaryjoin=sa.and_(
closedorders.c.isopen == 0,
users.c.id == closedorders.c.user_id,
),
lazy="subquery",
order_by=closedorders.c.id,
),
),
)
self._run_double_test(4)
def _run_double_test(self, count):
User, Address, Order, Item = self.classes(
"User", "Address", "Order", "Item"
)
q = create_session().query(User).order_by(User.id)
def go():
eq_(
[
User(
id=7,
addresses=[Address(id=1)],
open_orders=[Order(id=3)],
closed_orders=[Order(id=1), Order(id=5)],
),
User(
id=8,
addresses=[
Address(id=2),
Address(id=3),
Address(id=4),
],
open_orders=[],
closed_orders=[],
),
User(
id=9,
addresses=[Address(id=5)],
open_orders=[Order(id=4)],
closed_orders=[Order(id=2)],
),
User(id=10),
],
q.all(),
)
self.assert_sql_count(testing.db, go, count)
sess = create_session()
user = sess.query(User).get(7)
closed_mapper = User.closed_orders.entity
open_mapper = User.open_orders.entity
eq_(
[Order(id=1), Order(id=5)],
create_session()
.query(closed_mapper)
.with_parent(user, property="closed_orders")
.all(),
)
eq_(
[Order(id=3)],
create_session()
.query(open_mapper)
.with_parent(user, property="open_orders")
.all(),
)
class NonPrimaryMapperTest(_fixtures.FixtureTest, AssertsCompiledSQL):
__dialect__ = "default"
def test_non_primary_identity_class(self):
User = self.classes.User
users, addresses = self.tables.users, self.tables.addresses
class AddressUser(User):
pass
mapper(User, users, polymorphic_identity="user")
m2 = mapper(
AddressUser,
addresses,
inherits=User,
polymorphic_identity="address",
properties={"address_id": addresses.c.id},
)
with testing.expect_deprecated(
"The mapper.non_primary parameter is deprecated"
):
m3 = mapper(AddressUser, addresses, non_primary=True)
assert m3._identity_class is m2._identity_class
eq_(
m2.identity_key_from_instance(AddressUser()),
m3.identity_key_from_instance(AddressUser()),
)
def test_illegal_non_primary(self):
users, Address, addresses, User = (
self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User,
)
mapper(User, users)
mapper(Address, addresses)
with testing.expect_deprecated(
"The mapper.non_primary parameter is deprecated"
):
mapper(
User,
users,
non_primary=True,
properties={"addresses": relationship(Address)},
)
assert_raises_message(
sa.exc.ArgumentError,
"Attempting to assign a new relationship 'addresses' "
"to a non-primary mapper on class 'User'",
configure_mappers,
)
def test_illegal_non_primary_2(self):
User, users = self.classes.User, self.tables.users
with testing.expect_deprecated(
"The mapper.non_primary parameter is deprecated"
):
assert_raises_message(
sa.exc.InvalidRequestError,
"Configure a primary mapper first",
mapper,
User,
users,
non_primary=True,
)
def test_illegal_non_primary_3(self):
users, addresses = self.tables.users, self.tables.addresses
class Base(object):
pass
class Sub(Base):
pass
mapper(Base, users)
with testing.expect_deprecated(
"The mapper.non_primary parameter is deprecated"
):
assert_raises_message(
sa.exc.InvalidRequestError,
"Configure a primary mapper first",
mapper,
Sub,
addresses,
non_primary=True,
)
| true
| true
|
f70567387f4fdb0f240af2488067783b35be93ce
| 1,196
|
py
|
Python
|
tests/convert_softmax.py
|
juanCastrillo/gluon2pytorch
|
dc73055f0c74dbc45a70f21057fa161123826d86
|
[
"MIT"
] | 73
|
2018-11-01T03:07:11.000Z
|
2021-03-03T01:48:58.000Z
|
tests/convert_softmax.py
|
juanCastrillo/gluon2pytorch
|
dc73055f0c74dbc45a70f21057fa161123826d86
|
[
"MIT"
] | 5
|
2018-11-02T06:45:33.000Z
|
2019-09-24T06:54:59.000Z
|
tests/convert_softmax.py
|
juanCastrillo/gluon2pytorch
|
dc73055f0c74dbc45a70f21057fa161123826d86
|
[
"MIT"
] | 5
|
2019-01-29T00:03:24.000Z
|
2021-01-12T14:18:59.000Z
|
import torch
import mxnet as mx
import numpy as np
from gluon2pytorch import gluon2pytorch
class SoftmaxTest(mx.gluon.nn.HybridSequential):
def __init__(self):
super(SoftmaxTest, self).__init__()
from mxnet.gluon import nn
with self.name_scope():
self.conv1 = nn.Conv2D(3, 32)
def hybrid_forward(self, F, x):
x = F.softmax(self.conv1(x))
return x
def check_error(gluon_output, pytorch_output, epsilon=1e-5):
pytorch_output = pytorch_output.data.numpy()
gluon_output = gluon_output.asnumpy()
error = np.max(pytorch_output - gluon_output)
print('Error:', error)
assert error < epsilon
return error
if __name__ == '__main__':
print('Test softmax:')
net = SoftmaxTest()
# Make sure it's hybrid and initialized
net.hybridize()
net.collect_params().initialize()
pytorch_model = gluon2pytorch(net, [(1, 3, 224, 224)], dst_dir=None, pytorch_module_name='SoftmaxTest')
input_np = np.random.uniform(-1, 1, (1, 3, 224, 224))
gluon_output = net(mx.nd.array(input_np))
pytorch_output = pytorch_model(torch.FloatTensor(input_np))
check_error(gluon_output, pytorch_output)
| 26
| 107
| 0.68311
|
import torch
import mxnet as mx
import numpy as np
from gluon2pytorch import gluon2pytorch
class SoftmaxTest(mx.gluon.nn.HybridSequential):
def __init__(self):
super(SoftmaxTest, self).__init__()
from mxnet.gluon import nn
with self.name_scope():
self.conv1 = nn.Conv2D(3, 32)
def hybrid_forward(self, F, x):
x = F.softmax(self.conv1(x))
return x
def check_error(gluon_output, pytorch_output, epsilon=1e-5):
pytorch_output = pytorch_output.data.numpy()
gluon_output = gluon_output.asnumpy()
error = np.max(pytorch_output - gluon_output)
print('Error:', error)
assert error < epsilon
return error
if __name__ == '__main__':
print('Test softmax:')
net = SoftmaxTest()
net.hybridize()
net.collect_params().initialize()
pytorch_model = gluon2pytorch(net, [(1, 3, 224, 224)], dst_dir=None, pytorch_module_name='SoftmaxTest')
input_np = np.random.uniform(-1, 1, (1, 3, 224, 224))
gluon_output = net(mx.nd.array(input_np))
pytorch_output = pytorch_model(torch.FloatTensor(input_np))
check_error(gluon_output, pytorch_output)
| true
| true
|
f705675ec8d1348ee1124a33b5cc7917d1404582
| 2,070
|
py
|
Python
|
src.py/searchathing_unittest/core.py
|
devel0/SearchAThing.UnitTest
|
186c5fe7ad55966c8e3db96e31d3c7110b4670e4
|
[
"MIT"
] | null | null | null |
src.py/searchathing_unittest/core.py
|
devel0/SearchAThing.UnitTest
|
186c5fe7ad55966c8e3db96e31d3c7110b4670e4
|
[
"MIT"
] | null | null | null |
src.py/searchathing_unittest/core.py
|
devel0/SearchAThing.UnitTest
|
186c5fe7ad55966c8e3db96e31d3c7110b4670e4
|
[
"MIT"
] | null | null | null |
"""
* SearchAThing.UnitTest, Copyright(C) 2015-2017 Lorenzo Delana, License under MIT
*
* The MIT License(MIT)
* Copyright(c) 2015-2017 Lorenzo Delana, https://searchathing.com
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
"""
import unittest
from searchathing_core.number import *
class Core(unittest.TestCase):
def test_equals_auto_tol(self):
self.assertTrue(equals_auto_tol(1, 1))
self.assertTrue(equals_auto_tol(1, 1 + 1e-20))
self.assertFalse(equals_auto_tol(1, 2))
self.assertTrue(equals_auto_tol(1, 2, precision=2))
def test_mround(self):
self.assertTrue(equals_tol(1e-10, mround(4, 3), 3))
self.assertTrue(equals_tol(1e-10, mround(5, 3), 6))
self.assertTrue(equals_tol(1e-10, mround(-3.21, .1), -3.2))
self.assertTrue(equals_tol(1e-10, mround(-3.29, .1), -3.3))
def test_angle(self):
self.assertTrue(equals_tol(1e-6, to_deg(.21294), 12.200563))
self.assertTrue(equals_tol(1e-6, to_rad(140.3), 2.448697))
if __name__ == '__main__':
unittest.main()
| 40.588235
| 81
| 0.727536
|
import unittest
from searchathing_core.number import *
class Core(unittest.TestCase):
def test_equals_auto_tol(self):
self.assertTrue(equals_auto_tol(1, 1))
self.assertTrue(equals_auto_tol(1, 1 + 1e-20))
self.assertFalse(equals_auto_tol(1, 2))
self.assertTrue(equals_auto_tol(1, 2, precision=2))
def test_mround(self):
self.assertTrue(equals_tol(1e-10, mround(4, 3), 3))
self.assertTrue(equals_tol(1e-10, mround(5, 3), 6))
self.assertTrue(equals_tol(1e-10, mround(-3.21, .1), -3.2))
self.assertTrue(equals_tol(1e-10, mround(-3.29, .1), -3.3))
def test_angle(self):
self.assertTrue(equals_tol(1e-6, to_deg(.21294), 12.200563))
self.assertTrue(equals_tol(1e-6, to_rad(140.3), 2.448697))
if __name__ == '__main__':
unittest.main()
| true
| true
|
f70567d0c6ea41908433590f3fbf095df00bbd11
| 90,963
|
py
|
Python
|
src/sage/combinat/cluster_algebra_quiver/quiver_mutation_type.py
|
bopopescu/sage
|
2d495be78e0bdc7a0a635454290b27bb4f5f70f0
|
[
"BSL-1.0"
] | 3
|
2019-07-15T13:48:24.000Z
|
2019-11-08T12:31:43.000Z
|
src/sage/combinat/cluster_algebra_quiver/quiver_mutation_type.py
|
bopopescu/sage
|
2d495be78e0bdc7a0a635454290b27bb4f5f70f0
|
[
"BSL-1.0"
] | 2
|
2018-10-30T13:40:20.000Z
|
2020-07-23T12:13:30.000Z
|
src/sage/combinat/cluster_algebra_quiver/quiver_mutation_type.py
|
bopopescu/sage
|
2d495be78e0bdc7a0a635454290b27bb4f5f70f0
|
[
"BSL-1.0"
] | 1
|
2020-07-23T10:29:58.000Z
|
2020-07-23T10:29:58.000Z
|
r"""
Quiver mutation types
AUTHORS:
- Gregg Musiker (2012, initial version)
- Christian Stump (2012, initial version)
- Hugh Thomas (2012, initial version)
"""
#*****************************************************************************
# Copyright (C) 2011 Gregg Musiker <gmusiker@gmail.com>
# Christian Stump <christian.stump@gmail.com>
# Hugh Thomas <hugh@math.unb.ca>
#
# Distributed under the terms of the GNU General Public License (GPL)
# http://www.gnu.org/licenses/
#*****************************************************************************
# python3
from __future__ import division, print_function
from __future__ import absolute_import
from six.moves import range
from sage.structure.sage_object import SageObject
from copy import copy
from sage.structure.unique_representation import UniqueRepresentation
from sage.misc.all import cached_method
from sage.rings.all import ZZ, infinity
from sage.graphs.all import Graph, DiGraph
from sage.arith.all import binomial, Euler_Phi
from sage.all import prod
from sage.matrix.all import matrix
class QuiverMutationTypeFactory(SageObject):
def __call__(self, *args):
"""
For a detailed description, see :meth:`QuiverMutationType`.
EXAMPLES::
sage: from sage.combinat.cluster_algebra_quiver.quiver_mutation_type import QuiverMutationTypeFactory
sage: QuiverMutationTypeFactory()
QuiverMutationType
"""
# get data as arguments or as list/tuple
if len( args ) == 1:
data = args[0]
else:
data = args
# data is a QuiverMutationType
if isinstance(data, QuiverMutationType_Irreducible):
return data
elif isinstance(data, QuiverMutationType_Reducible):
return data
# check that data is a tuple or list
if isinstance(data, tuple) and len( data ) > 0:
pass
elif isinstance(data, list) and len( data ) > 0:
data = tuple( data )
else:
_mutation_type_error( data )
# check for reducible types
if all( type( data_component ) in [list,tuple,QuiverMutationType_Irreducible] for data_component in data ):
if len( data ) == 1: return QuiverMutationType( data[0] )
else:
data = tuple( QuiverMutationType(comp) for comp in data )
return QuiverMutationType_Reducible( *data )
# check for irreducible types
if len(data) == 2: data = (data[0],data[1],None)
elif len(data) == 3: pass
else: _mutation_type_error(data)
if isinstance(data[2], list): data = (data[0],data[1],tuple(data[2]))
if isinstance(data[1], list): data = (data[0],tuple(data[1]),data[2])
# mutation type casting
if True:
if data == ('D',2,None):
return QuiverMutationType( ('A',1,None), ('A',1,None) )
elif data == ('D',3,None):
data = ('A',3,None)
elif data == ('C',2,None):
data = ('B',2,None)
elif data == ('E',9,None):
data = ('E',8,1)
elif data[0] == 'A' and data[2] == 1 and isinstance(data[1], tuple) and len(data[1]) == 2 and min(data[1]) == 0:
if max(data[1]) == 0:
pass
elif max(data[1]) == 1:
data = ('A', 1,None)
elif max(data[1]) == 2:
return QuiverMutationType( ('A',1,None), ('A',1,None) )
elif max(data[1]) == 3:
data = ('A',3,None)
else:
data = ('D',max(data[1]),None)
elif data[0] == 'GR' and data[2] is None and isinstance(data[1], tuple) and len(data[1]) == 2 and data[1][1] > data[1][0]:
if min(data[1]) > max(data[1])/2 and max(data[1]) != min(data[1])+1:
data = (data[0],(max(data[1])-min(data[1]),max(data[1])),data[2])
if min(data[1]) == 2 and max(data[1]) > 3:
data = ('A',max(data[1])-3,None)
elif data[1] == (3,6):
data = ('D',4,None)
elif data[1] == (3,7):
data = ('E',6,None)
elif data[1] == (3,8):
data = ('E',8,None)
elif data[1] == (3,9):
data = ('E',8,[1,1])
elif data[1] == (4,8):
data = ('E',7,[1,1])
elif data == ('TR',1,None):
data = ('A',1,None)
elif data == ('TR',2,None):
data = ('A',3,None)
elif data == ('TR',3,None):
data = ('D',6,None)
elif data == ('TR',4,None):
data = ('E',8,(1,1))
# mutation type casting from Kac conventions
elif data == ('A',1,1):
data = ('A',(1,1),1)
elif data[0] == 'B' and data[2] == 1:
if data[1] == 2:
data = ('CC',2,1)
elif data[1] > 2:
data = ('BD',data[1],1)
elif data[0] == 'B' and data[2] == -1:
if data[1] == 2:
data = ('BB',2,1)
elif data[1] > 2:
data= ('CD',data[1],1)
elif data[0] == 'C' and data[1] > 1 and data[2] == 1:
data = ('CC',data[1],1)
elif data[0] == 'C' and data[1] > 1 and data[2] == -1:
data = ('BB',data[1],1)
elif data == ('A',2,2):
data = ('BC',1,1)
elif data[0] == 'A' and data[1] in ZZ and data[1] > 1 and data[1]%2 == 0 and data[2] == 2:
data = ('BC',data[1]//2,1)
elif data[0] == 'A' and data[1] in ZZ and data[1] > 3 and data[1]%2 == 1 and data[2] == 2:
data = ('CD',(data[1]+1)//2,1)
# We think of ('A',3,2) as ('D',3,2)
elif data == ('A',3,2):
data = ('BB',2,1)
elif data[0] == 'D' and data[1] in ZZ and data[1] > 2 and data[2] == 2:
data = ('BB',data[1]-1,1)
elif data == ('E',6,2):
data = ('F',4,-1)
elif data == ('D',4,3):
data = ('G',2,-1)
elif data == ('F',4,(2,1)):
data = ('F',4,(1,2))
elif data == ('G',2,(3,1)):
data = ('G',2,(1,3))
elif data[0] == 'T' and data[2] is None:
data = (data[0],tuple(sorted(data[1])),data[2])
r,p,q = data[1]
if r == 1:
data = ('A',p+q-1,None)
elif r == p == 2:
data = ('D',q+2,None)
elif r == 2 and p == 3:
if q in (3,4,5): data = ('E',q+3,None)
elif q == 6: data = ('E',8,1)
else: data = ('E',q+3,None)
elif r== 2 and p == q == 4:
data = ('E',7,1)
elif r == p == q == 3:
data = ('E',6,1)
elif data[0] == 'R2' and data[2] is None and all(data[1][i] in ZZ and data[1][i] > 0 for i in [0,1]):
data = (data[0],tuple(sorted(data[1])),data[2])
b,c = data[1]
if data[1] == (1,1):
data = ('A',2,None)
elif data[1] == (1,2):
data = ('B',2,None)
elif data[1] == (1,3):
data = ('G',2,None)
elif data[1] == (1,4):
data = ('BC',1,1)
elif data[1] == (2,2):
data = ('A',(1,1),1)
# setting the parameters and returning the mutation type
letter,rank,twist = data
if not isinstance(letter, str):
_mutation_type_error(data)
if isinstance(rank, list):
rank = tuple(rank)
if isinstance(twist, list):
twist = tuple(twist)
return QuiverMutationType_Irreducible(letter,rank,twist)
def _repr_(self):
"""
Return the string representation of ``self``.
EXAMPLES::
sage: QuiverMutationType # indirect doctest
QuiverMutationType
"""
return "QuiverMutationType"
def samples(self, finite=None, affine=None, elliptic=None,
mutation_finite=None):
"""
Return a sample of the available quiver mutations types.
INPUT:
- ``finite``
- ``affine``
- ``elliptic``
- ``mutation_finite``
All four input keywords default values are ``None``. If
set to ``True`` or ``False``, only these samples are returned.
EXAMPLES::
sage: QuiverMutationType.samples()
[['A', 1], ['A', 5], ['B', 2], ['B', 5], ['C', 3],
['C', 5], [ ['A', 1], ['A', 1] ], ['D', 5], ['E', 6],
['E', 7], ['E', 8], ['F', 4], ['G', 2],
['A', [1, 1], 1], ['A', [4, 5], 1], ['D', 4, 1],
['BB', 5, 1], ['E', 6, [1, 1]], ['E', 7, [1, 1]],
['R2', [1, 5]], ['R2', [3, 5]], ['E', 10], ['BE', 5],
['GR', [3, 10]], ['T', [3, 3, 4]]]
sage: QuiverMutationType.samples(finite=True)
[['A', 1], ['A', 5], ['B', 2], ['B', 5], ['C', 3],
['C', 5], [ ['A', 1], ['A', 1] ], ['D', 5], ['E', 6],
['E', 7], ['E', 8], ['F', 4], ['G', 2]]
sage: QuiverMutationType.samples(affine=True)
[['A', [1, 1], 1], ['A', [4, 5], 1], ['D', 4, 1], ['BB', 5, 1]]
sage: QuiverMutationType.samples(elliptic=True)
[['E', 6, [1, 1]], ['E', 7, [1, 1]]]
sage: QuiverMutationType.samples(mutation_finite=False)
[['R2', [1, 5]], ['R2', [3, 5]], ['E', 10], ['BE', 5],
['GR', [3, 10]], ['T', [3, 3, 4]]]
"""
result = self._samples()
if finite is not None:
result = [t for t in result if t.is_finite() == finite]
if affine is not None:
result = [t for t in result if t.is_affine() == affine]
if elliptic is not None:
result = [t for t in result if t.is_elliptic() == elliptic]
if mutation_finite is not None:
result = [t for t in result
if t.is_mutation_finite() == mutation_finite]
return result
@cached_method
def _samples(self):
"""
Return a list of sample of available Cartan types.
EXAMPLES::
sage: X = QuiverMutationType._samples()
"""
finite_types = \
[QuiverMutationType(t) for t in [['A', 1], ['A', 5], ['B', 2], ['B', 5],
['C', 3], ['C', 5], ['D', 2], ['D', 5],
["E", 6], ["E", 7], ["E", 8], ["F", 4],
["G", 2]]]
affine_types = \
[QuiverMutationType(t) for t in [['A', [1,1], 1], ['A', [4,5], 1], ['D', 4, 1], ['BB', 5, 1]]]
elliptic_types = \
[QuiverMutationType(t) for t in [['E', 6, [1,1]], ['E', 7, [1,1]]]]
mutation_finite_types = \
[QuiverMutationType(t) for t in [['R2',(1,5)], ['R2',(3,5)]]]
mutation_infinite_types = \
[QuiverMutationType(t) for t in [['E',10], ['BE',5], ['GR',(3,10)], ['T',(3,3,4)]]]
return finite_types + affine_types + elliptic_types + mutation_finite_types + mutation_infinite_types
QuiverMutationType = QuiverMutationTypeFactory()
QuiverMutationType.__doc__ = \
r"""
*Quiver mutation types* can be seen as a slight generalization of
*generalized Cartan types*.
Background on generalized Cartan types can be found at
:wikipedia:`Generalized_Cartan_matrix`
For the compendium on the cluster algebra and quiver package in Sage see [MS2011]_
A `B`-matrix is a skew-symmetrizable `( n \times n )`-matrix `M`.
I.e., there exists an invertible diagonal matrix `D` such that `DM` is
skew-symmetric. `M` can be encoded as a *quiver* by having a directed
edge from vertex `i` to vertex `j` with label `(a,b)` if `a = M_{i,j}
> 0` and `b = M_{j,i} < 0`. We consider quivers up to *mutation
equivalence*.
To a quiver mutation type we can associate a *generalized Cartan type*
by sending `M` to the generalized Cartan matrix `C(M)` obtained by
replacing all positive entries by their negatives and adding `2`'s on
the main diagonal.
``QuiverMutationType`` constructs a quiver mutation type object. For
more detail on the possible different types, please see the
compendium.
INPUT:
The input consists either of a quiver mutation type, or of a
``letter`` (a string), a ``rank`` (one integer or a list/tuple of
integers), and an optional ``twist`` (an integer or a list of
integers). There are several different naming conventions for quiver
mutation types.
- Finite type -- ``letter`` is a Dynkin type (A-G), and ``rank`` is
the rank.
- Affine type -- there is more than one convention for naming affine
types.
* Kac's notation: ``letter`` is a Dynkin type, ``rank`` is the
rank of the associated finite Dynkin diagram, and ``twist`` is the
twist, which could be 1, 2, or 3. In the special case of affine
type A, there is more than one quiver mutation type associated to
the Cartan type. In this case only, ``rank`` is a pair of integers
(i,j), giving the number of edges pointing clockwise and the number
of edges pointing counter-clockwise. The total number of vertices
is given by i+j in this case.
* Naive notation: ``letter`` is one of 'BB', 'BC', 'BD', 'CC',
'CD'. The name specifies the two ends of the diagram, which are
joined by a path. The total number of vertices is given by
``rank +1`` (to match the indexing people expect because these
are affine types). In general, ``rank`` must be large enough
for the picture to make sense, but we accept ``letter`` is
``BC`` and ``rank=1``.
* Macdonald notation: for the dual of an untwisted affine type
(such as ['C', 6,1]), we accept a twist of -1 (i.e.,
['C',6,-1]).
- Elliptic type -- ``letter`` is a Dynkin type, ``rank`` is the rank
of the finite Dynkin diagram, and ``twist`` is a tuple of two
integers. We follow Saito's notation.
- Other shapes:
* Rank 2: ``letter`` is 'R2', and ``rank`` is a pair of integers
specifying the label on the unique edge.
* Triangle: ``letter`` is ``TR``, and ``rank`` is the number of
vertices along a side.
* T: This defines a quiver shaped like a T. ``letter`` is 'T',
and the ``rank`` is a triple, whose entries specify the number
of vertices along each path from the branch point (counting the
branch point).
* Grassmannian: This defines the cluster algebra (without
coefficients) corresponding to the cluster algebra with
coefficients which is the co-ordinate ring of a Grassmannian.
``letter`` is 'GR'. ``rank`` is a pair of integers (`k`, `n`)
with 'k' < 'n' specifying the Grassmannian of `k`-planes in
`n`-space. This defines a quiver given by a (k-1) x (n-k-1)
grid where each square is cyclically oriented.
* Exceptional mutation finite quivers: The two exceptional
mutation finite quivers, found by Derksen-Owen, have ``letter``
as 'X' and ``rank`` 6 or 7, equal to the number of vertices.
* AE, BE, CE, DE: Quivers are built of one end which looks like
type (affine A), B, C, or D, and the other end which looks like
type E (i.e., it consists of two antennae, one of length one,
and one of length two). ``letter`` is 'AE', 'BE', 'CE', or
'DE', and ``rank`` is the total number of vertices. Note that
'AE' is of a slightly different form and requires ``rank`` to be
a pair of integers (i,j) just as in the case of affine type A.
See Exercise 4.3 in Kac's book Infinite Dimensional Lie Algebras
for more details.
* Infinite type E: It is also possible to obtain infinite-type E
quivers by specifying ``letter`` as 'E' and ``rank`` as the
number of vertices.
REFERENCES:
- A good reference for finite and affine Dynkin diagrams, including
Kac's notation, is the :wikipedia:`Dynkin_diagram`.
- A good reference for the skew-symmetrizable elliptic diagrams is
"Cluster algebras of finite mutation type via unfolding" by
A. Felikson, M. Shapiro, and P. Tumarkin, [FST2012]_.
EXAMPLES:
Finite types::
sage: QuiverMutationType('A',1)
['A', 1]
sage: QuiverMutationType('A',5)
['A', 5]
sage: QuiverMutationType('B',2)
['B', 2]
sage: QuiverMutationType('B',5)
['B', 5]
sage: QuiverMutationType('C',2)
['B', 2]
sage: QuiverMutationType('C',5)
['C', 5]
sage: QuiverMutationType('D',2)
[ ['A', 1], ['A', 1] ]
sage: QuiverMutationType('D',3)
['A', 3]
sage: QuiverMutationType('D',4)
['D', 4]
sage: QuiverMutationType('E',6)
['E', 6]
sage: QuiverMutationType('G',2)
['G', 2]
sage: QuiverMutationType('A',(1,0),1)
['A', 1]
sage: QuiverMutationType('A',(2,0),1)
[ ['A', 1], ['A', 1] ]
sage: QuiverMutationType('A',(7,0),1)
['D', 7]
Affine types::
sage: QuiverMutationType('A',(1,1),1)
['A', [1, 1], 1]
sage: QuiverMutationType('A',(2,4),1)
['A', [2, 4], 1]
sage: QuiverMutationType('BB',2,1)
['BB', 2, 1]
sage: QuiverMutationType('BB',4,1)
['BB', 4, 1]
sage: QuiverMutationType('CC',2,1)
['CC', 2, 1]
sage: QuiverMutationType('CC',4,1)
['CC', 4, 1]
sage: QuiverMutationType('BC',1,1)
['BC', 1, 1]
sage: QuiverMutationType('BC',5,1)
['BC', 5, 1]
sage: QuiverMutationType('BD',3,1)
['BD', 3, 1]
sage: QuiverMutationType('BD',5,1)
['BD', 5, 1]
sage: QuiverMutationType('CD',3,1)
['CD', 3, 1]
sage: QuiverMutationType('CD',5,1)
['CD', 5, 1]
sage: QuiverMutationType('D',4,1)
['D', 4, 1]
sage: QuiverMutationType('D',6,1)
['D', 6, 1]
sage: QuiverMutationType('E',6,1)
['E', 6, 1]
sage: QuiverMutationType('E',7,1)
['E', 7, 1]
sage: QuiverMutationType('E',8,1)
['E', 8, 1]
sage: QuiverMutationType('F',4,1)
['F', 4, 1]
sage: QuiverMutationType('F',4,-1)
['F', 4, -1]
sage: QuiverMutationType('G',2,1)
['G', 2, 1]
sage: QuiverMutationType('G',2,-1)
['G', 2, -1]
sage: QuiverMutationType('A',3,2) == QuiverMutationType('D',3,2)
True
Affine types using Kac's Notation::
sage: QuiverMutationType('A',1,1)
['A', [1, 1], 1]
sage: QuiverMutationType('B',5,1)
['BD', 5, 1]
sage: QuiverMutationType('C',5,1)
['CC', 5, 1]
sage: QuiverMutationType('A',2,2)
['BC', 1, 1]
sage: QuiverMutationType('A',7,2)
['CD', 4, 1]
sage: QuiverMutationType('A',8,2)
['BC', 4, 1]
sage: QuiverMutationType('D',6,2)
['BB', 5, 1]
sage: QuiverMutationType('E',6,2)
['F', 4, -1]
sage: QuiverMutationType('D',4,3)
['G', 2, -1]
Elliptic types::
sage: QuiverMutationType('E',6,[1,1])
['E', 6, [1, 1]]
sage: QuiverMutationType('F',4,[2,1])
['F', 4, [1, 2]]
sage: QuiverMutationType('G',2,[3,3])
['G', 2, [3, 3]]
Mutation finite types:
rank 2 cases::
sage: QuiverMutationType('R2',(1,1))
['A', 2]
sage: QuiverMutationType('R2',(1,2))
['B', 2]
sage: QuiverMutationType('R2',(1,3))
['G', 2]
sage: QuiverMutationType('R2',(1,4))
['BC', 1, 1]
sage: QuiverMutationType('R2',(1,5))
['R2', [1, 5]]
sage: QuiverMutationType('R2',(2,2))
['A', [1, 1], 1]
sage: QuiverMutationType('R2',(3,5))
['R2', [3, 5]]
Exceptional Derksen-Owen quivers::
sage: QuiverMutationType('X',6)
['X', 6]
(Mainly) mutation infinite types:
Infinite type E::
sage: QuiverMutationType('E',9)
['E', 8, 1]
sage: QuiverMutationType('E',10)
['E', 10]
sage: QuiverMutationType('E',12)
['E', 12]
sage: QuiverMutationType('AE',(2,3))
['AE', [2, 3]]
sage: QuiverMutationType('BE',5)
['BE', 5]
sage: QuiverMutationType('CE',5)
['CE', 5]
sage: QuiverMutationType('DE',6)
['DE', 6]
Grassmannian types::
sage: QuiverMutationType('GR',(2,4))
['A', 1]
sage: QuiverMutationType('GR',(2,6))
['A', 3]
sage: QuiverMutationType('GR',(3,6))
['D', 4]
sage: QuiverMutationType('GR',(3,7))
['E', 6]
sage: QuiverMutationType('GR',(3,8))
['E', 8]
sage: QuiverMutationType('GR',(3,10))
['GR', [3, 10]]
Triangular types::
sage: QuiverMutationType('TR',2)
['A', 3]
sage: QuiverMutationType('TR',3)
['D', 6]
sage: QuiverMutationType('TR',4)
['E', 8, [1, 1]]
sage: QuiverMutationType('TR',5)
['TR', 5]
T types::
sage: QuiverMutationType('T',(1,1,1))
['A', 1]
sage: QuiverMutationType('T',(1,1,4))
['A', 4]
sage: QuiverMutationType('T',(1,4,4))
['A', 7]
sage: QuiverMutationType('T',(2,2,2))
['D', 4]
sage: QuiverMutationType('T',(2,2,4))
['D', 6]
sage: QuiverMutationType('T',(2,3,3))
['E', 6]
sage: QuiverMutationType('T',(2,3,4))
['E', 7]
sage: QuiverMutationType('T',(2,3,5))
['E', 8]
sage: QuiverMutationType('T',(2,3,6))
['E', 8, 1]
sage: QuiverMutationType('T',(2,3,7))
['E', 10]
sage: QuiverMutationType('T',(3,3,3))
['E', 6, 1]
sage: QuiverMutationType('T',(3,3,4))
['T', [3, 3, 4]]
Reducible types::
sage: QuiverMutationType(['A',3],['B',4])
[ ['A', 3], ['B', 4] ]
"""
class QuiverMutationType_abstract(UniqueRepresentation, SageObject):
"""
EXAMPLES::
sage: mut_type1 = QuiverMutationType('A',5)
sage: mut_type2 = QuiverMutationType('A',5)
sage: mut_type3 = QuiverMutationType('A',6)
sage: mut_type1 == mut_type2
True
sage: mut_type1 == mut_type3
False
"""
def _repr_(self):
"""
Return the string representation of ``self``.
EXAMPLES::
sage: QuiverMutationType(['A',2]) # indirect doctest
['A', 2]
"""
return self._description
def plot(self, circular=False, directed=True):
"""
Return the plot of the underlying graph or digraph of ``self``.
INPUT:
- ``circular`` -- (default:``False``) if ``True``, the
circular plot is chosen, otherwise >>spring<< is used.
- ``directed`` -- (default: ``True``) if ``True``, the
directed version is shown, otherwise the undirected.
EXAMPLES::
sage: QMT = QuiverMutationType(['A',5])
sage: pl = QMT.plot()
sage: pl = QMT.plot(circular=True)
"""
return self.standard_quiver().plot(circular=circular, directed=directed)
def show(self, circular=False, directed=True):
"""
Show the plot of the underlying digraph of ``self``.
INPUT:
- ``circular`` -- (default:``False``) if ``True``, the
circular plot is chosen, otherwise >>spring<< is used.
- ``directed`` -- (default: ``True``) if ``True``, the
directed version is shown, otherwise the undirected.
TESTS::
sage: QMT = QuiverMutationType(['A',5])
sage: QMT.show() # long time
"""
self.plot( circular=circular, directed=directed ).show()
def letter(self):
"""
Return the classification letter of ``self``.
EXAMPLES::
sage: mut_type = QuiverMutationType( ['A',5] ); mut_type
['A', 5]
sage: mut_type.letter()
'A'
sage: mut_type = QuiverMutationType( ['BC',5,1] ); mut_type
['BC', 5, 1]
sage: mut_type.letter()
'BC'
sage: mut_type = QuiverMutationType(['A',3],['B',3]); mut_type
[ ['A', 3], ['B', 3] ]
sage: mut_type.letter()
'A x B'
sage: mut_type = QuiverMutationType(['A',3],['B',3],['X',6]); mut_type
[ ['A', 3], ['B', 3], ['X', 6] ]
sage: mut_type.letter()
'A x B x X'
"""
return self._letter
def rank(self):
"""
Return the rank in the standard quiver of ``self``.
The rank is the number of vertices.
EXAMPLES::
sage: mut_type = QuiverMutationType( ['A',5] ); mut_type
['A', 5]
sage: mut_type.rank()
5
sage: mut_type = QuiverMutationType( ['A',[4,5],1] ); mut_type
['A', [4, 5], 1]
sage: mut_type.rank()
9
sage: mut_type = QuiverMutationType( ['BC',5,1] ); mut_type
['BC', 5, 1]
sage: mut_type.rank()
6
sage: mut_type = QuiverMutationType(['A',3],['B',3]); mut_type
[ ['A', 3], ['B', 3] ]
sage: mut_type.rank()
6
sage: mut_type = QuiverMutationType(['A',3],['B',3],['X',6]); mut_type
[ ['A', 3], ['B', 3], ['X', 6] ]
sage: mut_type.rank()
12
"""
return self._rank
@cached_method
def b_matrix(self):
"""
Return the B-matrix of the standard quiver of ``self``.
The conventions for B-matrices agree with Fomin-Zelevinsky (up
to a reordering of the simple roots).
EXAMPLES::
sage: mut_type = QuiverMutationType( ['A',5] ); mut_type
['A', 5]
sage: mut_type.b_matrix()
[ 0 1 0 0 0]
[-1 0 -1 0 0]
[ 0 1 0 1 0]
[ 0 0 -1 0 -1]
[ 0 0 0 1 0]
sage: mut_type = QuiverMutationType(['A',3],['B',3]); mut_type
[ ['A', 3], ['B', 3] ]
sage: mut_type.b_matrix()
[ 0 1 0 0 0 0]
[-1 0 -1 0 0 0]
[ 0 1 0 0 0 0]
[ 0 0 0 0 1 0]
[ 0 0 0 -1 0 -1]
[ 0 0 0 0 2 0]
"""
return _edge_list_to_matrix(self._digraph.edges(), list(range(self._rank)), [])
@cached_method
def standard_quiver(self):
"""
Return the standard quiver of ``self``.
EXAMPLES::
sage: mut_type = QuiverMutationType( ['A',5] ); mut_type
['A', 5]
sage: mut_type.standard_quiver()
Quiver on 5 vertices of type ['A', 5]
sage: mut_type = QuiverMutationType( ['A',[5,3],1] ); mut_type
['A', [3, 5], 1]
sage: mut_type.standard_quiver()
Quiver on 8 vertices of type ['A', [3, 5], 1]
sage: mut_type = QuiverMutationType(['A',3],['B',3]); mut_type
[ ['A', 3], ['B', 3] ]
sage: mut_type.standard_quiver()
Quiver on 6 vertices of type [ ['A', 3], ['B', 3] ]
sage: mut_type = QuiverMutationType(['A',3],['B',3],['X',6]); mut_type
[ ['A', 3], ['B', 3], ['X', 6] ]
sage: mut_type.standard_quiver()
Quiver on 12 vertices of type [ ['A', 3], ['B', 3], ['X', 6] ]
"""
from .quiver import ClusterQuiver
Q = ClusterQuiver(self._digraph)
Q._mutation_type = self
return Q
@cached_method
def cartan_matrix(self):
"""
Return the Cartan matrix of ``self``.
Note that (up to a reordering of the simple roots) the convention for
the definition of Cartan matrix, used here and elsewhere in Sage,
agrees with the conventions of Kac, Fulton-Harris, and
Fomin-Zelevinsky, but disagrees with the convention of Bourbaki.
The `(i,j)` entry is `2(\\alpha_i,\\alpha_j)/(\\alpha_i,\\alpha_i)`.
EXAMPLES::
sage: mut_type = QuiverMutationType(['A',5]); mut_type
['A', 5]
sage: mut_type.cartan_matrix()
[ 2 -1 0 0 0]
[-1 2 -1 0 0]
[ 0 -1 2 -1 0]
[ 0 0 -1 2 -1]
[ 0 0 0 -1 2]
sage: mut_type = QuiverMutationType(['A',3],['B',3]); mut_type
[ ['A', 3], ['B', 3] ]
sage: mut_type.cartan_matrix()
[ 2 -1 0 0 0 0]
[-1 2 -1 0 0 0]
[ 0 -1 2 0 0 0]
[ 0 0 0 2 -1 0]
[ 0 0 0 -1 2 -1]
[ 0 0 0 0 -2 2]
"""
# as soon as CartanMatrix is implemented we should use it here:
# from sage.combinat.root_system.cartan_matrix import CartanMatrix
cmat = copy(self.b_matrix())
for i,j in cmat.nonzero_positions():
a = cmat[i,j]
if a > 0: cmat[i,j] = -a
for i in range(self._rank):
cmat[i,i] = 2
# return CartanMatrix(cmat)
return cmat
def is_irreducible(self):
"""
Return ``True`` if ``self`` is irreducible.
EXAMPLES::
sage: mt = QuiverMutationType(['A',2])
sage: mt.is_irreducible()
True
"""
return self._info['irreducible']
def is_mutation_finite(self):
"""
Return ``True`` if ``self`` is of finite mutation type.
This means that its mutation class has only finitely many
different B-matrices.
EXAMPLES::
sage: mt = QuiverMutationType(['D',5,1])
sage: mt.is_mutation_finite()
True
"""
return self._info['mutation_finite']
def is_simply_laced(self):
"""
Return ``True`` if ``self`` is simply laced.
This means that the only arrows that appear in the quiver of
``self`` are single unlabelled arrows.
EXAMPLES::
sage: mt = QuiverMutationType(['A',2])
sage: mt.is_simply_laced()
True
sage: mt = QuiverMutationType(['B',2])
sage: mt.is_simply_laced()
False
sage: mt = QuiverMutationType(['A',(1,1),1])
sage: mt.is_simply_laced()
False
"""
return self._info['simply_laced']
def is_skew_symmetric(self):
"""
Return ``True`` if the B-matrix of ``self`` is skew-symmetric.
EXAMPLES::
sage: mt = QuiverMutationType(['A',2])
sage: mt.is_skew_symmetric()
True
sage: mt = QuiverMutationType(['B',2])
sage: mt.is_skew_symmetric()
False
sage: mt = QuiverMutationType(['A',(1,1),1])
sage: mt.is_skew_symmetric()
True
"""
return self._info['skew_symmetric']
def is_finite(self):
"""
Return ``True`` if ``self`` is of finite type.
This means that the cluster algebra associated to ``self`` has
only a finite number of cluster variables.
EXAMPLES::
sage: mt = QuiverMutationType(['A',2])
sage: mt.is_finite()
True
sage: mt = QuiverMutationType(['A',[4,2],1])
sage: mt.is_finite()
False
"""
return self._info['finite']
def is_affine(self):
"""
Return ``True`` if ``self`` is of affine type.
EXAMPLES::
sage: mt = QuiverMutationType(['A',2])
sage: mt.is_affine()
False
sage: mt = QuiverMutationType(['A',[4,2],1])
sage: mt.is_affine()
True
"""
if self.is_irreducible():
return self._info['affine']
else:
return False
def is_elliptic(self):
"""
Return ``True`` if ``self`` is of elliptic type.
EXAMPLES::
sage: mt = QuiverMutationType(['A',2])
sage: mt.is_elliptic()
False
sage: mt = QuiverMutationType(['E',6,[1,1]])
sage: mt.is_elliptic()
True
"""
if self.is_irreducible():
return self._info['elliptic']
else:
return False
def properties(self):
"""
Print a scheme of all properties of ``self``.
Most properties have natural definitions for either irreducible or
reducible types. ``affine`` and ``elliptic`` are only defined for
irreducible types.
EXAMPLES::
sage: mut_type = QuiverMutationType(['A',3]); mut_type
['A', 3]
sage: mut_type.properties()
['A', 3] has rank 3 and the following properties:
- irreducible: True
- mutation finite: True
- simply-laced: True
- skew-symmetric: True
- finite: True
- affine: False
- elliptic: False
sage: mut_type = QuiverMutationType(['B',3]); mut_type
['B', 3]
sage: mut_type.properties()
['B', 3] has rank 3 and the following properties:
- irreducible: True
- mutation finite: True
- simply-laced: False
- skew-symmetric: False
- finite: True
- affine: False
- elliptic: False
sage: mut_type = QuiverMutationType(['B',3,1]); mut_type
['BD', 3, 1]
sage: mut_type.properties()
['BD', 3, 1] has rank 4 and the following properties:
- irreducible: True
- mutation finite: True
- simply-laced: False
- skew-symmetric: False
- finite: False
- affine: True
- elliptic: False
sage: mut_type = QuiverMutationType(['E',6,[1,1]]); mut_type
['E', 6, [1, 1]]
sage: mut_type.properties()
['E', 6, [1, 1]] has rank 8 and the following properties:
- irreducible: True
- mutation finite: True
- simply-laced: False
- skew-symmetric: True
- finite: False
- affine: False
- elliptic: True
sage: mut_type = QuiverMutationType(['A',3],['B',3]); mut_type
[ ['A', 3], ['B', 3] ]
sage: mut_type.properties()
[ ['A', 3], ['B', 3] ] has rank 6 and the following properties:
- irreducible: False
- mutation finite: True
- simply-laced: False
- skew-symmetric: False
- finite: True
sage: mut_type = QuiverMutationType('GR',[4,9]); mut_type
['GR', [4, 9]]
sage: mut_type.properties()
['GR', [4, 9]] has rank 12 and the following properties:
- irreducible: True
- mutation finite: False
- simply-laced: True
- skew-symmetric: True
- finite: False
- affine: False
- elliptic: False
"""
txt = '{} has rank {} and the following properties:'
print(txt.format(self, self.rank()))
s = "\t- {} {}"
print(s.format('irreducible: ', self.is_irreducible()))
print(s.format('mutation finite: ', self.is_mutation_finite()))
print(s.format('simply-laced: ', self.is_simply_laced()))
print(s.format('skew-symmetric: ', self.is_skew_symmetric()))
print(s.format('finite: ', self.is_finite()))
if self.is_irreducible():
print(s.format('affine: ', self.is_affine()))
print(s.format('elliptic: ', self.is_elliptic()))
class QuiverMutationType_Irreducible(QuiverMutationType_abstract):
"""
The mutation type for a cluster algebra or a quiver. Should not be
called directly, but through QuiverMutationType.
"""
def __init__(self, letter, rank, twist=None):
"""
Should not be called directly but through QuiverMutationType.
INPUT:
- ``letter`` -- the letter of the mutation type
- ``rank`` -- the rank of the mutation type
- ``twist`` -- the twist of the mutation type
EXAMPLES::
sage: QuiverMutationType('A',5)
['A', 5]
sage: QuiverMutationType('A',[4,5],1)
['A', [4, 5], 1]
sage: QuiverMutationType('BB',5,1)
['BB', 5, 1]
sage: QuiverMutationType('X',6)
['X', 6]
"""
# _rank and _bi_rank are initialized
self._rank = None
self._bi_rank = None
# _graph and _digraph are initialized
self._graph = Graph()
self._digraph = DiGraph()
# _info is initialized
self._info = {}
self._info['irreducible'] = True
self._info['mutation_finite'] = False
self._info['simply_laced'] = False
self._info['skew_symmetric'] = False
self._info['finite'] = False
self._info['affine'] = False
self._info['elliptic'] = False
self._info['irreducible_components'] = False
if isinstance(rank, tuple):
rank = list(rank)
if isinstance(twist, tuple):
twist = list(twist)
# _letter/twist is the input letter/twist
self._letter = letter
self._twist = twist
data = [letter,rank,twist]
# type A (finite and affine)
if letter == 'A':
if twist is None and rank in ZZ and rank > 0:
self._rank = rank
self._info['mutation_finite'] = True
self._info['simply_laced'] = True
self._info['skew_symmetric'] = True
self._info['finite'] = True
elif twist==1 and isinstance(rank, list) and len(rank) == 2 and all( rank[i] in ZZ and rank[i] >= 0 for i in [0,1] ) and rank != [0,0]:
if isinstance(rank, tuple):
rank = list( rank )
data[1] = rank
rank = sorted(rank)
self._bi_rank = rank
self._rank = sum( self._bi_rank )
self._info['mutation_finite'] = True
if self._rank > 2: self._info['simply_laced'] = True
self._info['skew_symmetric'] = True
if rank[0] > 0:
self._info['affine'] = True
elif rank[0] == 0:
self._info['finite'] = True
else:
_mutation_type_error( data )
# types ['A',1] and ['A',[0,1],1] need to be treated on
# itself (as there is no edge)
if twist is None and self._rank == 1 or twist == 1 and self._rank == 1:
self._graph.add_vertex( 0 )
# type ['A',[1,1],1] needs to be treated on itself as well
# (as there is a double edge)
elif twist == 1 and self._bi_rank[0] == 1 and self._bi_rank[1] == 1:
self._graph.add_edge( 0,1,2 )
else:
for i in range( self._rank - 1 ):
self._graph.add_edge( i, i+1, 1 )
if twist == 1:
self._digraph.add_edge( self._rank - 1, 0, 1 )
for i in range( self._rank - 1 ):
if i < ( 2 * self._bi_rank[0] ) and i%2 == 0:
self._digraph.add_edge( i+1, i, 1 )
else:
self._digraph.add_edge( i, i+1, 1 )
# type B (finite)
elif letter == 'B':
if twist is None and rank in ZZ and rank > 1:
self._rank = rank
self._info['mutation_finite'] = True
self._info['finite'] = True
else:
_mutation_type_error( data )
for i in range( rank - 2 ):
self._graph.add_edge( i, i+1, 1 )
if (rank % 2 == 0):
self._graph.add_edge( rank-2, rank-1, (1,-2) )
else:
self._graph.add_edge( rank-2, rank-1, (2,-1) )
# type C (finite)
elif letter == 'C':
if twist is None and rank in ZZ and rank > 1:
self._rank = rank
self._info['mutation_finite'] = True
self._info['finite'] = True
else:
_mutation_type_error( data )
for i in range( rank - 2 ):
self._graph.add_edge( i, i+1, 1 )
if (rank % 2 == 0):
self._graph.add_edge( rank-2, rank-1, (2,-1) )
else:
self._graph.add_edge( rank-2, rank-1, (1,-2) )
# type BB (affine)
elif letter == 'BB':
if twist == 1 and rank in ZZ and rank > 1:
self._rank = rank + 1
self._info['mutation_finite'] = True
self._info['affine'] = True
else:
_mutation_type_error( data )
for i in range( rank - 2 ):
self._graph.add_edge( i, i+1, 1 )
if rank % 2 == 0:
self._graph.add_edge( rank-2, rank-1, (1,-2) )
else:
self._graph.add_edge( rank-2, rank-1, (2,-1) )
self._graph.add_edge( rank, 0 , (1,-2) )
# type CC (affine)
elif letter == 'CC':
if twist == 1 and rank in ZZ and rank > 1:
self._rank = rank + 1
self._info['mutation_finite'] = True
self._info['affine'] = True
else:
_mutation_type_error( data )
for i in range( rank - 2 ):
self._graph.add_edge( i, i+1, 1 )
if rank % 2 == 0:
self._graph.add_edge( rank-2, rank-1, (2,-1) )
else:
self._graph.add_edge( rank-2, rank-1, (1,-2) )
self._graph.add_edge( rank, 0 , (2,-1) )
# type BC (affine)
elif letter == 'BC':
if twist == 1 and rank in ZZ and rank >= 1:
self._rank = rank + 1
self._info['mutation_finite'] = True
self._info['affine'] = True
else:
_mutation_type_error( data )
if rank == 1:
self._graph.add_edge( 0,1,(1,-4) )
else:
for i in range( rank - 2 ):
self._graph.add_edge( i, i+1, 1 )
if (rank % 2 == 0):
self._graph.add_edge( rank-2, rank-1, (2,-1) )
else:
self._graph.add_edge( rank-2, rank-1, (1,-2) )
if twist == 1:
self._graph.add_edge( rank, 0 , (1,-2) )
# type BD (affine)
elif letter == 'BD':
if twist == 1 and rank in ZZ and rank > 2:
self._rank = rank + 1
self._info['mutation_finite'] = True
self._info['affine'] = True
else:
_mutation_type_error( data )
for i in range( rank - 2 ):
self._graph.add_edge( i, i+1, 1 )
if (rank % 2 == 0):
self._graph.add_edge( rank-2, rank-1, (1,-2) )
else:
self._graph.add_edge( rank-2, rank-1, (2,-1) )
if twist == 1:
self._graph.add_edge( rank, 1 , 1 )
# type CD (affine)
elif letter == 'CD':
if twist == 1 and rank in ZZ and rank > 2:
self._rank = rank + 1
self._info['mutation_finite'] = True
self._info['affine'] = True
else:
_mutation_type_error( data )
for i in range( rank - 2 ):
self._graph.add_edge( i, i+1, 1 )
if (rank % 2 == 0):
self._graph.add_edge( rank-2, rank-1, (2,-1) )
else:
self._graph.add_edge( rank-2, rank-1, (1,-2) )
if twist == 1:
self._graph.add_edge( rank, 1 , 1 )
# type D (finite and affine)
elif letter == 'D':
if rank in ZZ and rank > 3 and twist is None:
self._rank = rank
self._info['mutation_finite'] = True
self._info['simply_laced'] = True
self._info['skew_symmetric'] = True
self._info['finite'] = True
elif twist == 1 and rank in ZZ and rank > 3:
self._rank = rank + 1
self._info['mutation_finite'] = True
self._info['simply_laced'] = True
self._info['skew_symmetric'] = True
self._info['affine'] = True
else:
_mutation_type_error( data )
for i in range( rank - 2 ):
self._graph.add_edge( i, i+1, 1 )
self._graph.add_edge( rank-3, rank-1, 1 )
if twist is not None:
self._graph.add_edge( rank, 1 ,1 )
# type E (finite, affine and elliptic)
elif letter == 'E':
if rank in [6,7,8] and twist is None:
self._rank = rank
self._info['mutation_finite'] = True
self._info['simply_laced'] = True
self._info['skew_symmetric'] = True
self._info['finite'] = True
if rank == 6:
self._graph.add_edges( [ (0,1),(1,2),(2,3),(3,4),(2,5) ] )
elif rank == 7:
self._graph.add_edges([(0, 1), (1, 2), (2, 3),
(3, 4), (4, 5), (2, 6)])
elif rank == 8:
self._graph.add_edges([(0, 1), (1, 2), (2, 3),
(3, 4), (4, 5), (5, 6),(2, 7)])
elif rank in [6,7,8] and twist == 1:
self._rank = rank + 1
self._info['mutation_finite'] = True
self._info['simply_laced'] = True
self._info['skew_symmetric'] = True
self._info['affine'] = True
if rank == 6:
self._graph.add_edges( [ (0,1),(1,2),(2,3),(3,4),(2,5),(5,6) ] )
elif rank == 7:
self._graph.add_edges( [ (0,1),(1,2),(2,3),(3,4),(4,5),(5,6),(3,7) ] )
elif rank == 8:
self._graph.add_edges( [ (0,1),(1,2),(2,3),(3,4),(4,5),(5,6),(6,7),(2,8) ] )
elif rank in [6,7,8] and twist == [1,1]:
self._rank = rank + 2
self._info['mutation_finite'] = True
self._info['skew_symmetric'] = True
self._info['elliptic'] = True
if rank == 6:
self._digraph.add_edges( [ (0,1,1),(1,2,1),(3,2,1),(3,4,1),(5,6,1),(6,7,1),(5,1,1),(2,5,2),(5,3,1),(6,2,1) ] )
elif rank == 7:
self._digraph.add_edges( [ (1,0,1),(1,2,1),(2,3,1),(4,3,1),(4,5,1),(6,5,1),(7,8,1),(3,7,2),(7,2,1),(7,4,1),(8,3,1) ] )
elif rank == 8:
self._digraph.add_edges( [ (0,1,1),(1,9,1),(3,9,1),(3,4,1),(2,8,1),(2,1,1),(9,2,2),(2,3,1),(8,9,1),(5,4,1),(5,6,1),(7,6,1) ] )
# type E (mutation infinite)
elif rank > 9 and twist is None:
self._info['simply_laced'] = True
self._info['skew_symmetric'] = True
self._rank = rank
for i in range(rank-2):
self._graph.add_edge( i, i+1, 1 )
self._graph.add_edge( 2, rank-1 )
else:
_mutation_type_error(data)
# type AE (mutation infinite)
elif letter == 'AE':
if isinstance(rank, list) and len(rank) == 2 and all( rank[i] in ZZ and rank[i] > 0 for i in [0,1] ) and twist is None:
if isinstance(rank, tuple):
rank = list( rank )
data[1] = rank
rank = sorted(rank)
self._bi_rank = rank
self._rank = sum( self._bi_rank ) + 1
if self._rank > 3: self._info['simply_laced'] = True
self._info['skew_symmetric'] = True
if self._bi_rank == [1,1]:
self._graph.add_edges( [(0,1,2),(1,2,None)] )
else:
self._digraph.add_edge( self._rank - 2, 0 )
for i in range(self._rank-2):
if i < ( 2 * self._bi_rank[0] ) and i%2 == 0:
self._digraph.add_edge(i+1,i)
else:
self._digraph.add_edge(i,i+1)
self._digraph.add_edge(self._rank-2,self._rank-1)
else:
_mutation_type_error( data )
# type BE (mutation infinite)
elif letter == 'BE':
if rank >4 and twist is None:
self._rank = rank
for i in range(rank-3):
self._graph.add_edge( i, i+1 )
self._graph.add_edge( 2, rank-1 )
if rank%2 == 0:
self._graph.add_edge( rank-3,rank-2,(2,-1) )
else:
self._graph.add_edge( rank-3,rank-2,(1,-2) )
else:
_mutation_type_error( data )
# type CE (mutation infinite)
elif letter == 'CE':
if rank >4 and twist is None:
self._rank = rank
for i in range(rank-3):
self._graph.add_edge( i, i+1 )
self._graph.add_edge( 2, rank-1 )
if rank%2 == 0:
self._graph.add_edge( rank-3,rank-2,(1,-2) )
else:
self._graph.add_edge( rank-3,rank-2,(2,-1) )
else:
_mutation_type_error( data )
# type DE (mutation infinite)
elif letter == 'DE':
if rank >5 and twist is None:
self._rank = rank
self._info['simply_laced'] = True
self._info['skew_symmetric'] = True
for i in range(rank-3):
self._graph.add_edge( i, i+1 )
self._graph.add_edge( 2, rank-2 )
self._graph.add_edge( rank-4, rank-1 )
else:
_mutation_type_error( data )
# type F (finite, affine, and elliptic)
elif letter == 'F':
if rank == 4 and twist is None:
self._rank = rank
self._info['mutation_finite'] = True
self._info['finite'] = True
self._graph.add_edges( [ (0,1,None),(1,2,(2,-1)),(2,3,None) ] )
elif rank == 4 and twist == 1:
self._rank = rank + 1
self._info['mutation_finite'] = True
self._info['affine'] = True
self._graph.add_edges( [ (0,1,None), (1,2,None),
(2,3,(1,-2)),(3,4,None) ] )
elif rank == 4 and twist == -1:
self._rank = rank + 1
self._info['mutation_finite'] = True
self._info['affine'] = True
self._graph.add_edges( [ (0,1,None), (1,2,None),
(2,3,(2,-1)),(3,4,None) ] )
elif rank == 4 and (twist == [1,2]):
self._rank = rank + 2
self._info['mutation_finite'] = True
self._info['elliptic'] = True
self._digraph.add_edges( [ (0,1,None), (1,2,None),
(2,3,(2,-1)), (4,2,(1,-2)),
(3,4,2), (4,5,None), (5,3,None) ])
elif rank == 4 and (twist == [2,1]):
self._rank = rank + 2
self._info['mutation_finite'] = True
self._info['elliptic'] = True
self._digraph.add_edges( [ (0,1,None), (1,2,None),
(2,3,(1,-2)), (4,2,(2,-1)),
(3,4,2), (4,5,None), (5,3,None) ])
elif rank == 4 and twist == [2,2]:
self._rank = rank + 2
self._info['mutation_finite'] = True
self._info['elliptic'] = True
self._digraph.add_edges( [ (0,1,None), (1,2,None),
(3,1,None), (2,3,2),
(4,2,(2,-1)), (3,4,(1,-2)),
(5,4,None) ] )
elif rank == 4 and twist == [1,1]:
self._rank = rank + 2
self._info['mutation_finite'] = True
self._info['elliptic'] = True
self._digraph.add_edges( [ (0,1,None), (1,2,None),
(3,1,None), (2,3,2), (4,2,(1,-2)),
(3,4,(2,-1)), (5,4,None) ] )
else:
_mutation_type_error( data )
# type G (finite, affine, and elliptic)
elif letter == 'G':
if rank == 2 and twist is None:
self._rank = rank
self._info['mutation_finite'] = True
self._info['finite'] = True
self._graph.add_edges( [ (0,1,(1,-3)) ] )
elif rank == 2 and twist == -1:
self._rank = rank + 1
self._info['mutation_finite'] = True
self._info['affine'] = True
self._graph.add_edges( [ (0,1,None),(1,2,(1,-3)) ] )
elif rank == 2 and twist == 1:
self._rank = rank + 1
self._info['mutation_finite'] = True
self._info['affine'] = True
self._graph.add_edges( [ (0,1,None),(1,2,(3,-1)) ] )
elif rank == 2 and (twist == [1,3]):
self._rank = rank + 2
self._info['mutation_finite'] = True
self._info['elliptic'] = True
self._digraph.add_edges( [ (0,1,None), (1,2,(3,-1)),
(3,1,(1,-3)), (2,3,2)] )
elif rank == 2 and (twist == [3,1]):
self._rank = rank + 2
self._info['mutation_finite'] = True
self._info['elliptic'] = True
self._digraph.add_edges( [ (0,1,None), (1,2,(1,-3)),
(3,1,(3,-1)), (2,3,2)] )
elif rank == 2 and twist == [3,3]:
self._rank = rank + 2
self._info['mutation_finite'] = True
self._info['elliptic'] = True
self._digraph.add_edges( [ (1,0,None), (0,2,2), (3,0,(3,-1)),
(2,1,None), (2,3, (1,-3))])
elif rank == 2 and twist == [1,1]:
self._rank = rank + 2
self._info['mutation_finite'] = True
self._info['elliptic'] = True
self._digraph.add_edges( [ (1,0,None), (0,2,2), (3,0,(1,-3)),
(2,1,None), (2,3,(3,-1)) ] )
else:
_mutation_type_error( data )
# type GR (mutation infinite)
elif letter == 'GR':
if twist is None and isinstance(rank, list) and len(rank) == 2 and all( rank[i] in ZZ and rank[i] > 0 for i in [0,1] ) and rank[1] - 1 > rank[0] > 1:
gr_rank = (rank[0]-1,rank[1]-rank[0]-1)
self._rank = prod(gr_rank)
self._info['simply_laced'] = True
self._info['skew_symmetric'] = True
a,b = gr_rank
for i in range(a):
for j in range(b):
if i < a-1:
if (i+j) % 2 == 0:
self._digraph.add_edge(i*b+j,(i+1)*b+j)
else:
self._digraph.add_edge((i+1)*b+j,i*b+j)
if j < b-1:
if (i+j) % 2 == 0:
self._digraph.add_edge(i*b+j+1,i*b+j)
else:
self._digraph.add_edge(i*b+j,i*b+j+1)
else:
_mutation_type_error( data )
# type R2 (rank 2 finite mutation types)
elif letter == 'R2':
if twist is None and isinstance(rank, list) and len(rank) == 2 and all( rank[i] in ZZ and rank[i] > 0 for i in [0,1] ):
rank = sorted(rank)
b,c = rank
self._rank = 2
if b == c: self._info['skew_symmetric'] = True
self._graph.add_edge(0,1,(b,-c))
else:
_mutation_type_error( data )
# type T
elif letter == 'T':
if twist is None and isinstance(rank, list) and len(rank) == 3 and all( rank[i] in ZZ and rank[i] > 0 for i in [0,1,2] ):
if isinstance(rank, tuple):
rank = list( rank )
data[1] = rank
rank = sorted( rank )
self._rank = sum( rank ) - 2
self._info['simply_laced'] = True
self._info['skew_symmetric'] = True
r,p,q = rank
for i in range(q-1):
if i == 0:
self._graph.add_edge(0,1)
self._graph.add_edge(0,r)
self._graph.add_edge(0,r+p-1)
else:
if i < r-1:
self._graph.add_edge(i,i+1)
if i < p-1:
self._graph.add_edge(i+r-1,i+r)
self._graph.add_edge(i+r+p-2,i+r+p-1)
else:
_mutation_type_error( data )
# type TR (mutation infinite if rank > 2)
elif letter == 'TR':
# type ['TR',1] needs to be treated on itself (as there is no edge)
if twist is None and rank == 1:
self._graph.add_vertex( 0 )
elif twist is None and rank > 1:
self._rank = rank*(rank+1)//2
self._info['simply_laced'] = True
self._info['skew_symmetric'] = True
level = 0
while level < rank:
nr = rank*level-sum(range(level))
for i in range(nr,nr+rank-level-1):
self._digraph.add_edge(i,i+1)
self._digraph.add_edge(i+rank-level,i)
self._digraph.add_edge(i+1,i+rank-level)
level += 1
else:
_mutation_type_error( data )
# type X
elif letter == 'X':
if rank in [6,7] and twist is None:
self._rank = rank
self._info['mutation_finite'] = True
self._info['skew_symmetric'] = True
self._digraph.add_edges( [ (0,1,2),(1,2,None),(2,0,None),
(2,3,None),(3,4,2),(4,2,None),
(2,5,None) ] )
if rank == 7:
self._digraph.add_edges( [ (5,6,2),(6,2,None) ] )
else:
_mutation_type_error( data )
# otherwise, an error is raised
else:
_mutation_type_error( data )
# in the bipartite case, the digraph is constructed from the graph
if not self._digraph:
if self._graph.is_bipartite():
self._digraph = _bipartite_graph_to_digraph( self._graph )
else:
raise ValueError('The QuiverMutationType does not have '
'a Coxeter diagram.')
# in the other cases, the graph is constructed from the digraph
if not self._graph:
self._graph = self._digraph.to_undirected()
# _description is as for CartanType
if twist: self._description = str( [letter,rank,twist] )
else: self._description = str( [letter,rank] )
def irreducible_components( self ):
"""
Return a list of all irreducible components of ``self``.
EXAMPLES::
sage: mut_type = QuiverMutationType('A',3); mut_type
['A', 3]
sage: mut_type.irreducible_components()
(['A', 3],)
"""
return tuple([self])
@cached_method
def class_size(self):
r"""
If it is known, the size of the mutation class of all quivers
which are mutation equivalent to the standard quiver of
``self`` (up to isomorphism) is returned.
Otherwise, ``NotImplemented`` is returned.
Formula for finite type A is taken from Torkildsen - Counting
cluster-tilted algebras of type `A_n`.
Formulas for affine type A and finite type D are taken from Bastian,
Prellberg, Rubey, Stump - Counting the number of elements in the
mutation classes of `\widetilde A_n` quivers.
Formulas for finite and affine types B and C are
proven but not yet published.
Conjectural formulas for several other non-simply-laced affine types
are implemented.
Exceptional Types (finite, affine, and elliptic) E, F, G, and X are
hardcoded.
EXAMPLES::
sage: mut_type = QuiverMutationType( ['A',5] ); mut_type
['A', 5]
sage: mut_type.class_size()
19
sage: mut_type = QuiverMutationType( ['A',[10,3],1] ); mut_type
['A', [3, 10], 1]
sage: mut_type.class_size()
142120
sage: mut_type = QuiverMutationType( ['B',6] ); mut_type
['B', 6]
sage: mut_type.class_size()
132
sage: mut_type = QuiverMutationType( ['BD',6,1] ); mut_type
['BD', 6, 1]
sage: mut_type.class_size()
Warning: This method uses a formula which has not been proved correct.
504
Check that :trac:`14048` is fixed::
sage: mut_type = QuiverMutationType( ['F',4,(2,1)] )
sage: mut_type.class_size()
90
"""
if not self.is_mutation_finite():
return infinity
# type A (finite and affine)
if self._letter == 'A':
# the formula is taken from Torkildsen - Counting
# cluster-tilted algebras of type A
if self.is_finite():
n = self._rank
a = binomial( 2*(n+1), n+1 ) // (n+2)
if n % 2 == 1:
a += binomial( n+1, (n+1)//2 )
if n % 3 == 0:
a += 2 * binomial( 2*n//3, n//3 )
return a // (n+3)
# the formula is taken from Bastian, Prellberg, Rubey, Stump
elif self.is_affine():
i,j = self._bi_rank
i = ZZ(i)
j = ZZ(j)
n = i+j
f = Euler_Phi()
if i == j:
return ( binomial( 2*i,i ) +
sum( f(k) * binomial(2*i//k,i//k)**2
for k in [k for k in i.divisors()
if k in j.divisors()] ) // n ) // 4
else:
return sum( f(k) * binomial(2*i//k,i//k) *
binomial(2*j//k,j//k)
for k in [k for k in i.divisors()
if k in j.divisors()] ) // ( 2 * n )
# types B and C (finite and affine)
elif self._letter in ['B', 'C']:
# this formula is proven but nowhere published correctness
# is clear enough that I don't think a warning is needed
if self.is_finite():
n = self._rank
return binomial(2 * n, n) // (n + 1)
elif self._letter in ['BB','CC']:
# these two formulas are not yet proven
print(Warning("Warning: This method uses a formula "
"which has not been proved correct."))
if self.is_affine():
if self._twist == 1:
n = self._rank - 1
if n%2==1:
return binomial( 2*n-1, n-1 )
else:
return binomial( 2*n-1, n-1 ) + binomial( n-1, n//2 -1 )
# type BC (affine)
elif self._letter == 'BC':
# this formula is not yet proven
print(Warning("Warning: This method uses a formula "
"which has not been proved correct."))
if self.is_affine():
if self._twist == 1:
n = self._rank - 1
return binomial( 2*n, n )
# types BD and CD (affine)
elif self._letter in ['BD','CD']:
# this formula is not yet proven
print(Warning("Warning: This method uses a formula "
"which has not been proved correct."))
if self.is_affine():
if self._twist == 1:
n = self._rank - 2
return 2*binomial( 2*n, n )
# type D (finite and affine)
elif self._letter == 'D':
# the formula is taken from Bastian, Prellberg, Rubey, Stump
if self.is_finite():
if self._rank == 4:
return 6
else:
f = Euler_Phi()
n = ZZ(self._rank)
return sum( f( n//k ) * binomial( 2*k, k )
for k in n.divisors() ) // (2*n)
# this formula is not yet proven
elif self.is_affine():
n = self._rank - 3
if n == 2:
return 9
else:
print(Warning ("Warning: This method uses a formula "
"which has not been proved correct."))
if n%2==1:
return 2*binomial(2*n,n)
else:
return 2*binomial(2*n,n) + binomial(n, n//2)
# the exceptional types are hard-coded
# type E (finite, affine and elliptic)
elif self._letter == 'E':
if self.is_finite():
if self._rank == 6:
return 67
elif self._rank == 7:
return 416
elif self._rank == 8:
return 1574
elif self.is_affine():
if self._rank == 7:
return 132
elif self._rank == 8:
return 1080
elif self._rank == 9:
return 7560
elif self.is_elliptic():
if self._rank == 8:
return 49
elif self._rank == 9:
return 506
elif self._rank == 10:
return 5739
# type F
elif self._letter == 'F':
if self.is_finite():
return 15
elif self.is_affine():
return 60
elif self.is_elliptic():
if self._twist == [1,2]:
return 90
if self._twist == [1,1] or self._twist == [2,2]:
return 35
# type G
elif self._letter == 'G':
if self.is_finite():
return 2
elif self.is_affine():
return 6
elif self.is_elliptic():
if self._twist == [1,3]:
return 7
if self._twist == [1,1] or self._twist == [3,3]:
return 2
# type X
elif self._letter == 'X':
if self._rank == 6:
return 5
elif self._rank == 7:
return 2
# otherwise the size is returned to be unknown
else:
print("Size unknown")
return NotImplemented
def dual(self):
"""
Return the QuiverMutationType which is dual to ``self``.
EXAMPLES::
sage: mut_type = QuiverMutationType('A',5); mut_type
['A', 5]
sage: mut_type.dual()
['A', 5]
sage: mut_type = QuiverMutationType('B',5); mut_type
['B', 5]
sage: mut_type.dual()
['C', 5]
sage: mut_type.dual().dual()
['B', 5]
sage: mut_type.dual().dual() == mut_type
True
"""
letter = self.letter()
# the self-dual cases
if letter != 'BC' and letter[0] in ['B','C']:
if letter == 'BB': letter = 'CC'
elif letter == 'CC': letter = 'BB'
elif letter[0] == 'B': letter = 'C' + letter[1:]
elif letter[0] == 'C': letter = 'B' + letter[1:]
rank = self._rank
if self.is_affine():
rank -= 1
twist = self._twist
return QuiverMutationType(letter,rank,twist)
# the cases F and G have non-trivial duality in some cases
elif letter in ['F','G']:
if self.is_finite(): return self
elif self.is_affine():
rank = self._rank - 1
twist = - self._twist
elif self.is_elliptic():
twist = self._twist
rank = self._rank - 2
if letter == 'F':
if self._twist == [2,2]:
twist == [1,1]
if self._twist == [1,1]:
twist == [2,2]
if letter == 'G':
if self._twist == [3,3]:
twist = [1,1]
elif self._twist == [1,1]:
twist = [3,3]
else: rank = self._rank
return QuiverMutationType(letter,rank,twist)
else:
return self
class QuiverMutationType_Reducible(QuiverMutationType_abstract):
"""
The mutation type for a cluster algebra or a quiver. Should not be
called directly, but through QuiverMutationType. Inherits from
QuiverMutationType_abstract.
"""
def __init__(self, *args):
"""
Should not be called directly, but through QuiverMutationType.
INPUT:
- ``data`` -- a list each of whose entries is a
QuiverMutationType_Irreducible
EXAMPLES::
sage: QuiverMutationType(['A',4],['B',6])
[ ['A', 4], ['B', 6] ]
"""
data = args
if len(data) < 2 or not all( isinstance(comp, QuiverMutationType_Irreducible) for comp in data ):
return _mutation_type_error(data)
# _info is initialized
self._info = {}
self._info['irreducible'] = False
self._info['mutation_finite'] = all(comp.is_mutation_finite()
for comp in data)
self._info['simply_laced'] = all(comp.is_simply_laced()
for comp in data)
self._info['skew_symmetric'] = all(comp.is_skew_symmetric()
for comp in data)
self._info['finite'] = all(comp.is_finite() for comp in data)
self._info['irreducible_components'] = copy(data)
# letter and rank are initialized
self._letter = ''
self._rank = 0
# graph and digraph are initialized
self._graph = Graph()
self._digraph = DiGraph()
for comp in data:
if self._letter:
self._letter += ' x '
self._letter += comp._letter
self._rank += comp._rank
self._graph = self._graph.disjoint_union(comp._graph,
labels='integers')
self._digraph = self._digraph.disjoint_union(comp._digraph,
labels='integers')
self._graph.name('')
self._digraph.name('')
# _description is as for CartanType
self._description = "[ "
comps = self.irreducible_components()
for i in range(len(comps)):
if i > 0: self._description += ", "
self._description += comps[i]._description
self._description += " ]"
def irreducible_components( self ):
"""
Return a list of all irreducible components of ``self``.
EXAMPLES::
sage: mut_type = QuiverMutationType('A',3); mut_type
['A', 3]
sage: mut_type.irreducible_components()
(['A', 3],)
sage: mut_type = QuiverMutationType(['A',3],['B',3]); mut_type
[ ['A', 3], ['B', 3] ]
sage: mut_type.irreducible_components()
(['A', 3], ['B', 3])
sage: mut_type = QuiverMutationType(['A',3],['B',3],['X',6])
sage: mut_type
[ ['A', 3], ['B', 3], ['X', 6] ]
sage: mut_type.irreducible_components()
(['A', 3], ['B', 3], ['X', 6])
"""
return self._info['irreducible_components']
@cached_method
def class_size(self):
"""
If it is known, the size of the mutation class of all quivers
which are mutation equivalent to the standard quiver of
``self`` (up to isomorphism) is returned.
Otherwise, ``NotImplemented`` is returned.
EXAMPLES::
sage: mut_type = QuiverMutationType(['A',3],['B',3]); mut_type
[ ['A', 3], ['B', 3] ]
sage: mut_type.class_size()
20
sage: mut_type = QuiverMutationType(['A',3],['B',3],['X',6])
sage: mut_type
[ ['A', 3], ['B', 3], ['X', 6] ]
sage: mut_type.class_size()
100
"""
if not self.is_mutation_finite():
return infinity
else:
components = []
multiplicities = []
for x in self.irreducible_components():
if components.count(x) == 0:
components.append(x)
multiplicities.append(1)
else:
y = components.index(x)
multiplicities[y] = multiplicities[y]+1
sizes = [ x.class_size() for x in components ]
if NotImplemented in sizes:
print("Size unknown")
return NotImplemented
else:
return prod( [binomial(sizes[i]+multiplicities[i]-1,
multiplicities[i] ) for i in range (0,len(sizes))])
def dual(self):
"""
Return the QuiverMutationType which is dual to ``self``.
EXAMPLES::
sage: mut_type = QuiverMutationType(['A',5],['B',6],['C',5],['D',4]); mut_type
[ ['A', 5], ['B', 6], ['C', 5], ['D', 4] ]
sage: mut_type.dual()
[ ['A', 5], ['C', 6], ['B', 5], ['D', 4] ]
"""
comps = self.irreducible_components()
return QuiverMutationType( [comp.dual() for comp in comps ] )
def _construct_classical_mutation_classes(n):
r"""
Return a dict with keys being tuples representing regular
QuiverMutationTypes of the given rank, and with values being lists
or sets containing all mutation equivalent quivers as dig6 data.
EXAMPLES::
sage: from sage.combinat.cluster_algebra_quiver.quiver_mutation_type import _construct_classical_mutation_classes
sage: rank_2_classes = _construct_classical_mutation_classes(2) # long time
sage: for mut_class in sorted(rank_2_classes.keys(),key=str): # long time
....: print("{} {}".format(mut_class, rank_2_classes[mut_class]))
('A', (1, 1), 1) [('AO', (((0, 1), (2, -2)),))]
('A', 2) [('AO', ())]
('B', 2) [('AO', (((0, 1), (1, -2)),)), ('AO', (((0, 1), (2, -1)),))]
('BC', 1, 1) [('AO', (((0, 1), (1, -4)),)),
('AO', (((0, 1), (4, -1)),))]
"""
from sage.combinat.cluster_algebra_quiver.quiver import ClusterQuiver
data = {}
# finite A
data[ ('A',n) ] = ClusterQuiver(['A',n]).mutation_class(data_type='dig6')
# affine A
for j in range(1, n//2+1):
data[ ('A',(n-j,j),1) ] = ClusterQuiver(['A',[n-j,j],1]).mutation_class(data_type='dig6')
# finite B
if n > 1:
data[ ('B',n) ] = ClusterQuiver(['B',n]).mutation_class(data_type='dig6')
# affine B
if n > 2:
data[ ('BB',n-1,1) ] = ClusterQuiver(['BB',n-1,1]).mutation_class(data_type='dig6')
# finite C
if n > 2:
data[ ('C',n) ] = ClusterQuiver(['C',n]).mutation_class(data_type='dig6')
# affine C
if n > 1:
data[ ('BC',n-1,1) ] = ClusterQuiver(['BC',n-1,1]).mutation_class(data_type='dig6')
# affine CC
if n > 2:
data[ ('CC',n-1,1) ] = ClusterQuiver(['CC',n-1,1]).mutation_class(data_type='dig6')
# affine BD
if n > 3:
data[ ('BD',n-1,1) ] = ClusterQuiver(['BD',n-1,1]).mutation_class(data_type='dig6')
# affine CD
if n > 3:
data[ ('CD',n-1,1) ] = ClusterQuiver(['CD',n-1,1]).mutation_class(data_type='dig6')
# finite D
if n > 3:
data[ ('D',n) ] = ClusterQuiver(['D',n]).mutation_class(data_type='dig6')
# affine D
if n > 4:
data[ ('D',n-1,1) ] = ClusterQuiver(['D',n-1,1]).mutation_class(data_type='dig6')
return data
def _construct_exceptional_mutation_classes(n):
r"""
Return a dict with keys being tuples representing exceptional
QuiverMutationTypes of the given rank, and with values being lists
or sets containing all mutation equivalent quivers as dig6 data.
EXAMPLES::
sage: from sage.combinat.cluster_algebra_quiver.quiver_mutation_type import _construct_exceptional_mutation_classes
sage: rank_3_exceptional = _construct_exceptional_mutation_classes(3) # long time
sage: for mut_class in sorted(rank_3_exceptional.keys(), key=str): # long time
....: print("{} {}".format(mut_class, rank_3_exceptional[mut_class]))
('G', 2, -1) [('BH?', (((1, 2), (1, -3)),)),
('BGO', (((2, 1), (3, -1)),)), ('BW?', (((0, 1), (3, -1)),)),
('BP?', (((0, 1), (1, -3)),)),
('BP_', (((0, 1), (1, -3)), ((2, 0), (3, -1)))),
('BP_', (((0, 1), (3, -1)), ((1, 2), (1, -3)), ((2, 0), (2, -2))))]
('G', 2, 1) [('BH?', (((1, 2), (3, -1)),)),
('BGO', (((2, 1), (1, -3)),)), ('BW?', (((0, 1), (1, -3)),)),
('BP?', (((0, 1), (3, -1)),)),
('BKO', (((1, 0), (3, -1)), ((2, 1), (1, -3)))),
('BP_', (((0, 1), (2, -2)), ((1, 2), (1, -3)), ((2, 0), (3, -1))))]
"""
from sage.combinat.cluster_algebra_quiver.quiver import ClusterQuiver
data = {}
# finite E
if n in [6,7,8]:
data[ ('E',n) ] = ClusterQuiver(['E',n]).mutation_class(data_type='dig6')
# affine E
if n in [7,8,9]:
data[ ('E',n-1,1) ] = ClusterQuiver(['E',n-1,1]).mutation_class(data_type='dig6')
# elliptic E
if n in [8,9,10]:
data[ ('E',n-2,(1,1)) ] = ClusterQuiver(['E',n-2,[1,1]]).mutation_class(data_type='dig6')
# finite F
if n == 4:
data[ ('F',4) ] = ClusterQuiver(['F',4]).mutation_class(data_type='dig6')
# affine F
if n == 5:
data[ ('F',4,1) ] = ClusterQuiver(['F',4,1]).mutation_class(data_type='dig6')
data[ ('F',4,-1) ] = ClusterQuiver(['F',4,-1]).mutation_class(data_type='dig6')
# finite G
if n == 2:
data[ ('G',2) ] = ClusterQuiver(['G',2]).mutation_class(data_type='dig6')
# affine G
if n == 3:
data[ ('G',2,1) ] = ClusterQuiver(['G',2,1]).mutation_class(data_type='dig6')
data[ ('G',2,-1) ] = ClusterQuiver(['G',2,-1]).mutation_class(data_type='dig6')
# elliptic G
if n == 4:
data[ ('G',2,(1,3)) ] = ClusterQuiver(['G',2,(1,3)]).mutation_class(data_type='dig6')
data[ ('G',2,(1,1)) ] = ClusterQuiver(['G',2,(1,1)]).mutation_class(data_type='dig6')
data[ ('G',2,(3,3)) ] = ClusterQuiver(['G',2,(3,3)]).mutation_class(data_type='dig6')
# X
if n in [6,7]:
data[ ('X',n) ] = ClusterQuiver(['X',n]).mutation_class(data_type='dig6')
# elliptic F
if n == 6:
data[ ('F',4,(1,2)) ] = ClusterQuiver(['F',4,(1,2)]).mutation_class(data_type='dig6')
data[ ('F',4,(1,1)) ] = ClusterQuiver(['F',4,(1,1)]).mutation_class(data_type='dig6')
data[ ('F',4,(2,2)) ] = ClusterQuiver(['F',4,(2,2)]).mutation_class(data_type='dig6')
return data
def _save_data_dig6(n, types='ClassicalExceptional', verbose=False):
"""
Save all exceptional mutation classes as dig6 data into the file ``exc_classes_n.dig6`` in the folder ``DOT_SAGE``.
TESTS::
sage: from sage.combinat.cluster_algebra_quiver.quiver_mutation_type import save_quiver_data
sage: save_quiver_data(2) # indirect doctest
<BLANKLINE>
The following types are saved to file ... and will now be used to determine quiver mutation types:
[('A', 1)]
<BLANKLINE>
The following types are saved to file ... and will now be used to determine quiver mutation types:
[('A', (1, 1), 1), ('A', 2), ('B', 2), ('BC', 1, 1), ('G', 2)]
sage: save_quiver_data(2,up_to=False) # indirect doctest
<BLANKLINE>
The following types are saved to file ... and will now be used to determine quiver mutation types:
[('A', (1, 1), 1), ('A', 2), ('B', 2), ('BC', 1, 1), ('G', 2)]
sage: save_quiver_data(2,up_to=False, types='Classical') # indirect doctest
<BLANKLINE>
The following types are saved to file ... and will now be used to determine quiver mutation types:
[('A', (1, 1), 1), ('A', 2), ('B', 2), ('BC', 1, 1)]
sage: save_quiver_data(2,up_to=False, types='Exceptional') # indirect doctest
<BLANKLINE>
The following types are saved to file ... and will now be used to determine quiver mutation types:
[('G', 2)]
sage: save_quiver_data(2,up_to=False, verbose=False) # indirect doctest
"""
import os.path
from six.moves import cPickle
data = {}
possible_types = ['Classical', 'ClassicalExceptional', 'Exceptional']
if types not in possible_types:
raise ValueError('The third input must be either ClassicalExceptional'
' (default), Classical, or Exceptional.')
if types in possible_types[:2]:
data.update(_construct_classical_mutation_classes(n))
if types in possible_types[1:]:
data.update(_construct_exceptional_mutation_classes(n))
from sage.env import DOT_SAGE
from sage.misc.misc import sage_makedirs
types_path = os.path.join(DOT_SAGE, 'cluster_algebra_quiver')
types_file = os.path.join(types_path,'mutation_classes_%s.dig6'%n)
sage_makedirs(types_path)
from sage.misc.temporary_file import atomic_write
with atomic_write(types_file, binary=True) as f:
cPickle.dump(data, f)
if verbose:
keys = sorted(data.keys(),key=str)
print("\nThe following types are saved to file", types_file,"and will now be used to determine quiver mutation types:")
print(keys)
def save_quiver_data(n, up_to=True, types='ClassicalExceptional', verbose=True):
r"""
Save mutation classes of certain quivers of ranks up to and equal
to ``n`` or equal to ``n`` to
``DOT_SAGE/cluster_algebra_quiver/mutation_classes_n.dig6``.
This data will then be used to determine quiver mutation types.
INPUT:
- ``n``: the rank (or the upper limit on the rank) of the mutation
classes that are being saved.
- ``up_to`` -- (default:``True``) if ``True``, saves data for
ranks smaller than or equal to ``n``. If ``False``, saves data
for rank exactly ``n``.
- ``types`` -- (default:'ClassicalExceptional') if all, saves data
for both exceptional mutation-finite quivers and for classical
quiver. The input 'Exceptional' or 'Classical' is also allowed
to save only part of this data.
TESTS::
sage: from sage.combinat.cluster_algebra_quiver.quiver_mutation_type import save_quiver_data
sage: save_quiver_data(2)
<BLANKLINE>
The following types are saved to file ... and will now be used to determine quiver mutation types:
[('A', 1)]
<BLANKLINE>
The following types are saved to file ... and will now be used to determine quiver mutation types:
[('A', (1, 1), 1), ('A', 2), ('B', 2), ('BC', 1, 1), ('G', 2)]
sage: save_quiver_data(2,up_to=False)
<BLANKLINE>
The following types are saved to file ... and will now be used to determine quiver mutation types:
[('A', (1, 1), 1), ('A', 2), ('B', 2), ('BC', 1, 1), ('G', 2)]
sage: save_quiver_data(2,up_to=False, types='Classical')
<BLANKLINE>
The following types are saved to file ... and will now be used to determine quiver mutation types:
[('A', (1, 1), 1), ('A', 2), ('B', 2), ('BC', 1, 1)]
sage: save_quiver_data(2,up_to=False, types='Exceptional')
<BLANKLINE>
The following types are saved to file ... and will now be used to determine quiver mutation types:
[('G', 2)]
sage: save_quiver_data(2,up_to=False, verbose=False)
"""
from sage.combinat.cluster_algebra_quiver.mutation_type import load_data
if up_to is True:
ranks = range(1,n+1)
elif up_to is False:
ranks = [n]
for i in ranks:
_save_data_dig6(i,types=types,verbose=verbose)
# we finally clear the load_data
load_data.clear_cache()
def _bipartite_graph_to_digraph(g):
"""
Return a digraph obtained from a bipartite graph ``g`` by choosing one
set of the bipartition to be the set of sinks and the other to be the
set of sources.
EXAMPLES::
sage: from sage.combinat.cluster_algebra_quiver.quiver_mutation_type import _bipartite_graph_to_digraph
sage: G = Graph([(1,2)])
sage: _bipartite_graph_to_digraph(G)
Digraph on 2 vertices
"""
if not g.is_bipartite():
raise ValueError('The input graph is not bipartite.')
order = g.bipartite_sets()
dg = DiGraph()
for edge in g.edges():
if edge[0] in order[0]:
dg.add_edge( edge[0], edge[1], edge[2] )
else:
dg.add_edge( edge[1], edge[0], edge[2] )
for vert in g.vertex_iterator():
if vert not in dg.vertices():
dg.add_vertex(vert)
return dg
def _is_mutation_type(data):
"""
Return ``True`` if ``data`` is a QuiverMutationType.
EXAMPLES::
sage: from sage.combinat.cluster_algebra_quiver.quiver_mutation_type import _is_mutation_type
sage: _is_mutation_type ( [ 'A', 2 ] )
True
sage: _is_mutation_type ( [ 'P', 1 ] )
False
"""
try:
QuiverMutationType(data)
return True
except Exception:
return False
def _mutation_type_error(data):
r"""
Output an error message because data which is not a valid quiver mutation
type has been passed to QuiverMutationType.
EXAMPLES::
sage: QuiverMutationType( 'Christian', 'Stump' ) # indirect doctest
Traceback (most recent call last):
...
ValueError: ['Christian', 'Stump'] is not a valid quiver mutation type
Finite types have the form [ '?', n ] for type ? and rank n
Affine type A has the form [ 'A', [ i, j ], 1 ] for rank i+j
Affine type ? has the form [ '?', k, \pm 1 ] for rank k+1
Elliptic type ? has the form [ '?', k, [i, j] ] (1 <= i,j <= 3) for rank k+2
For correct syntax in other types, please consult the documentation.
"""
if data[2] is None:
del data[2]
return_str = str(data) + ' is not a valid quiver mutation type'
return_str += '\n Finite types have the form [ \'?\', n ] for type ? and rank n'
return_str += '\n Affine type A has the form [ \'A\', [ i, j ], 1 ] for rank i+j'
return_str += '\n Affine type ? has the form [ \'?\', k, \\pm 1 ] for rank k+1'
return_str += '\n Elliptic type ? has the form [ \'?\', k, [i, j] ] (1 <= i,j <= 3) for rank k+2'
return_str += '\n For correct syntax in other types, please consult the documentation.'
raise ValueError(return_str)
def _edge_list_to_matrix(edges, nlist, mlist):
r"""
Return the matrix obtained from the edge list of a quiver.
INPUT:
- ``edges`` -- the list of edges
- ``nlist`` -- the list of mutable vertices of the quiver
- ``mlist`` -- the list of frozen vertices of the quiver
OUTPUT:
An `(n+m) \times n` matrix corresponding to the edge-list.
EXAMPLES::
sage: from sage.combinat.cluster_algebra_quiver.quiver_mutation_type import _edge_list_to_matrix
sage: G = QuiverMutationType(['A',2])._digraph
sage: _edge_list_to_matrix(G.edges(), [0,1], [])
[ 0 1]
[-1 0]
sage: G2 = DiGraph([('a', 'b', 1)])
sage: _edge_list_to_matrix(G2.edges(), ['a', 'b'], [])
[ 0 1]
[-1 0]
sage: G3 = DiGraph([('a', 'b', 1), ('b', 'c', 1)])
sage: _edge_list_to_matrix(G3.edges(), ['a', 'b'], ['c'])
[ 0 1]
[-1 0]
[ 0 -1]
"""
n = len(nlist)
m = len(mlist)
nmlist = nlist + mlist
M = matrix(ZZ, n + m, n, sparse=True)
for edge in edges:
if edge[2] is None:
edge = (edge[0], edge[1], (1, -1))
elif edge[2] in ZZ:
edge = (edge[0], edge[1], (edge[2], -edge[2]))
v1, v2, (a, b) = edge
if v1 in nlist:
M[nmlist.index(v2), nmlist.index(v1)] = b
if v2 in nlist:
M[nmlist.index(v1), nmlist.index(v2)] = a
return M
| 37.158088
| 161
| 0.486725
|
from __future__ import division, print_function
from __future__ import absolute_import
from six.moves import range
from sage.structure.sage_object import SageObject
from copy import copy
from sage.structure.unique_representation import UniqueRepresentation
from sage.misc.all import cached_method
from sage.rings.all import ZZ, infinity
from sage.graphs.all import Graph, DiGraph
from sage.arith.all import binomial, Euler_Phi
from sage.all import prod
from sage.matrix.all import matrix
class QuiverMutationTypeFactory(SageObject):
def __call__(self, *args):
if len( args ) == 1:
data = args[0]
else:
data = args
if isinstance(data, QuiverMutationType_Irreducible):
return data
elif isinstance(data, QuiverMutationType_Reducible):
return data
if isinstance(data, tuple) and len( data ) > 0:
pass
elif isinstance(data, list) and len( data ) > 0:
data = tuple( data )
else:
_mutation_type_error( data )
if all( type( data_component ) in [list,tuple,QuiverMutationType_Irreducible] for data_component in data ):
if len( data ) == 1: return QuiverMutationType( data[0] )
else:
data = tuple( QuiverMutationType(comp) for comp in data )
return QuiverMutationType_Reducible( *data )
if len(data) == 2: data = (data[0],data[1],None)
elif len(data) == 3: pass
else: _mutation_type_error(data)
if isinstance(data[2], list): data = (data[0],data[1],tuple(data[2]))
if isinstance(data[1], list): data = (data[0],tuple(data[1]),data[2])
if True:
if data == ('D',2,None):
return QuiverMutationType( ('A',1,None), ('A',1,None) )
elif data == ('D',3,None):
data = ('A',3,None)
elif data == ('C',2,None):
data = ('B',2,None)
elif data == ('E',9,None):
data = ('E',8,1)
elif data[0] == 'A' and data[2] == 1 and isinstance(data[1], tuple) and len(data[1]) == 2 and min(data[1]) == 0:
if max(data[1]) == 0:
pass
elif max(data[1]) == 1:
data = ('A', 1,None)
elif max(data[1]) == 2:
return QuiverMutationType( ('A',1,None), ('A',1,None) )
elif max(data[1]) == 3:
data = ('A',3,None)
else:
data = ('D',max(data[1]),None)
elif data[0] == 'GR' and data[2] is None and isinstance(data[1], tuple) and len(data[1]) == 2 and data[1][1] > data[1][0]:
if min(data[1]) > max(data[1])/2 and max(data[1]) != min(data[1])+1:
data = (data[0],(max(data[1])-min(data[1]),max(data[1])),data[2])
if min(data[1]) == 2 and max(data[1]) > 3:
data = ('A',max(data[1])-3,None)
elif data[1] == (3,6):
data = ('D',4,None)
elif data[1] == (3,7):
data = ('E',6,None)
elif data[1] == (3,8):
data = ('E',8,None)
elif data[1] == (3,9):
data = ('E',8,[1,1])
elif data[1] == (4,8):
data = ('E',7,[1,1])
elif data == ('TR',1,None):
data = ('A',1,None)
elif data == ('TR',2,None):
data = ('A',3,None)
elif data == ('TR',3,None):
data = ('D',6,None)
elif data == ('TR',4,None):
data = ('E',8,(1,1))
elif data == ('A',1,1):
data = ('A',(1,1),1)
elif data[0] == 'B' and data[2] == 1:
if data[1] == 2:
data = ('CC',2,1)
elif data[1] > 2:
data = ('BD',data[1],1)
elif data[0] == 'B' and data[2] == -1:
if data[1] == 2:
data = ('BB',2,1)
elif data[1] > 2:
data= ('CD',data[1],1)
elif data[0] == 'C' and data[1] > 1 and data[2] == 1:
data = ('CC',data[1],1)
elif data[0] == 'C' and data[1] > 1 and data[2] == -1:
data = ('BB',data[1],1)
elif data == ('A',2,2):
data = ('BC',1,1)
elif data[0] == 'A' and data[1] in ZZ and data[1] > 1 and data[1]%2 == 0 and data[2] == 2:
data = ('BC',data[1]//2,1)
elif data[0] == 'A' and data[1] in ZZ and data[1] > 3 and data[1]%2 == 1 and data[2] == 2:
data = ('CD',(data[1]+1)//2,1)
elif data == ('A',3,2):
data = ('BB',2,1)
elif data[0] == 'D' and data[1] in ZZ and data[1] > 2 and data[2] == 2:
data = ('BB',data[1]-1,1)
elif data == ('E',6,2):
data = ('F',4,-1)
elif data == ('D',4,3):
data = ('G',2,-1)
elif data == ('F',4,(2,1)):
data = ('F',4,(1,2))
elif data == ('G',2,(3,1)):
data = ('G',2,(1,3))
elif data[0] == 'T' and data[2] is None:
data = (data[0],tuple(sorted(data[1])),data[2])
r,p,q = data[1]
if r == 1:
data = ('A',p+q-1,None)
elif r == p == 2:
data = ('D',q+2,None)
elif r == 2 and p == 3:
if q in (3,4,5): data = ('E',q+3,None)
elif q == 6: data = ('E',8,1)
else: data = ('E',q+3,None)
elif r== 2 and p == q == 4:
data = ('E',7,1)
elif r == p == q == 3:
data = ('E',6,1)
elif data[0] == 'R2' and data[2] is None and all(data[1][i] in ZZ and data[1][i] > 0 for i in [0,1]):
data = (data[0],tuple(sorted(data[1])),data[2])
b,c = data[1]
if data[1] == (1,1):
data = ('A',2,None)
elif data[1] == (1,2):
data = ('B',2,None)
elif data[1] == (1,3):
data = ('G',2,None)
elif data[1] == (1,4):
data = ('BC',1,1)
elif data[1] == (2,2):
data = ('A',(1,1),1)
letter,rank,twist = data
if not isinstance(letter, str):
_mutation_type_error(data)
if isinstance(rank, list):
rank = tuple(rank)
if isinstance(twist, list):
twist = tuple(twist)
return QuiverMutationType_Irreducible(letter,rank,twist)
def _repr_(self):
return "QuiverMutationType"
def samples(self, finite=None, affine=None, elliptic=None,
mutation_finite=None):
result = self._samples()
if finite is not None:
result = [t for t in result if t.is_finite() == finite]
if affine is not None:
result = [t for t in result if t.is_affine() == affine]
if elliptic is not None:
result = [t for t in result if t.is_elliptic() == elliptic]
if mutation_finite is not None:
result = [t for t in result
if t.is_mutation_finite() == mutation_finite]
return result
@cached_method
def _samples(self):
finite_types = \
[QuiverMutationType(t) for t in [['A', 1], ['A', 5], ['B', 2], ['B', 5],
['C', 3], ['C', 5], ['D', 2], ['D', 5],
["E", 6], ["E", 7], ["E", 8], ["F", 4],
["G", 2]]]
affine_types = \
[QuiverMutationType(t) for t in [['A', [1,1], 1], ['A', [4,5], 1], ['D', 4, 1], ['BB', 5, 1]]]
elliptic_types = \
[QuiverMutationType(t) for t in [['E', 6, [1,1]], ['E', 7, [1,1]]]]
mutation_finite_types = \
[QuiverMutationType(t) for t in [['R2',(1,5)], ['R2',(3,5)]]]
mutation_infinite_types = \
[QuiverMutationType(t) for t in [['E',10], ['BE',5], ['GR',(3,10)], ['T',(3,3,4)]]]
return finite_types + affine_types + elliptic_types + mutation_finite_types + mutation_infinite_types
QuiverMutationType = QuiverMutationTypeFactory()
QuiverMutationType.__doc__ = \
r"""
*Quiver mutation types* can be seen as a slight generalization of
*generalized Cartan types*.
Background on generalized Cartan types can be found at
:wikipedia:`Generalized_Cartan_matrix`
For the compendium on the cluster algebra and quiver package in Sage see [MS2011]_
A `B`-matrix is a skew-symmetrizable `( n \times n )`-matrix `M`.
I.e., there exists an invertible diagonal matrix `D` such that `DM` is
skew-symmetric. `M` can be encoded as a *quiver* by having a directed
edge from vertex `i` to vertex `j` with label `(a,b)` if `a = M_{i,j}
> 0` and `b = M_{j,i} < 0`. We consider quivers up to *mutation
equivalence*.
To a quiver mutation type we can associate a *generalized Cartan type*
by sending `M` to the generalized Cartan matrix `C(M)` obtained by
replacing all positive entries by their negatives and adding `2`'s on
the main diagonal.
``QuiverMutationType`` constructs a quiver mutation type object. For
more detail on the possible different types, please see the
compendium.
INPUT:
The input consists either of a quiver mutation type, or of a
``letter`` (a string), a ``rank`` (one integer or a list/tuple of
integers), and an optional ``twist`` (an integer or a list of
integers). There are several different naming conventions for quiver
mutation types.
- Finite type -- ``letter`` is a Dynkin type (A-G), and ``rank`` is
the rank.
- Affine type -- there is more than one convention for naming affine
types.
* Kac's notation: ``letter`` is a Dynkin type, ``rank`` is the
rank of the associated finite Dynkin diagram, and ``twist`` is the
twist, which could be 1, 2, or 3. In the special case of affine
type A, there is more than one quiver mutation type associated to
the Cartan type. In this case only, ``rank`` is a pair of integers
(i,j), giving the number of edges pointing clockwise and the number
of edges pointing counter-clockwise. The total number of vertices
is given by i+j in this case.
* Naive notation: ``letter`` is one of 'BB', 'BC', 'BD', 'CC',
'CD'. The name specifies the two ends of the diagram, which are
joined by a path. The total number of vertices is given by
``rank +1`` (to match the indexing people expect because these
are affine types). In general, ``rank`` must be large enough
for the picture to make sense, but we accept ``letter`` is
``BC`` and ``rank=1``.
* Macdonald notation: for the dual of an untwisted affine type
(such as ['C', 6,1]), we accept a twist of -1 (i.e.,
['C',6,-1]).
- Elliptic type -- ``letter`` is a Dynkin type, ``rank`` is the rank
of the finite Dynkin diagram, and ``twist`` is a tuple of two
integers. We follow Saito's notation.
- Other shapes:
* Rank 2: ``letter`` is 'R2', and ``rank`` is a pair of integers
specifying the label on the unique edge.
* Triangle: ``letter`` is ``TR``, and ``rank`` is the number of
vertices along a side.
* T: This defines a quiver shaped like a T. ``letter`` is 'T',
and the ``rank`` is a triple, whose entries specify the number
of vertices along each path from the branch point (counting the
branch point).
* Grassmannian: This defines the cluster algebra (without
coefficients) corresponding to the cluster algebra with
coefficients which is the co-ordinate ring of a Grassmannian.
``letter`` is 'GR'. ``rank`` is a pair of integers (`k`, `n`)
with 'k' < 'n' specifying the Grassmannian of `k`-planes in
`n`-space. This defines a quiver given by a (k-1) x (n-k-1)
grid where each square is cyclically oriented.
* Exceptional mutation finite quivers: The two exceptional
mutation finite quivers, found by Derksen-Owen, have ``letter``
as 'X' and ``rank`` 6 or 7, equal to the number of vertices.
* AE, BE, CE, DE: Quivers are built of one end which looks like
type (affine A), B, C, or D, and the other end which looks like
type E (i.e., it consists of two antennae, one of length one,
and one of length two). ``letter`` is 'AE', 'BE', 'CE', or
'DE', and ``rank`` is the total number of vertices. Note that
'AE' is of a slightly different form and requires ``rank`` to be
a pair of integers (i,j) just as in the case of affine type A.
See Exercise 4.3 in Kac's book Infinite Dimensional Lie Algebras
for more details.
* Infinite type E: It is also possible to obtain infinite-type E
quivers by specifying ``letter`` as 'E' and ``rank`` as the
number of vertices.
REFERENCES:
- A good reference for finite and affine Dynkin diagrams, including
Kac's notation, is the :wikipedia:`Dynkin_diagram`.
- A good reference for the skew-symmetrizable elliptic diagrams is
"Cluster algebras of finite mutation type via unfolding" by
A. Felikson, M. Shapiro, and P. Tumarkin, [FST2012]_.
EXAMPLES:
Finite types::
sage: QuiverMutationType('A',1)
['A', 1]
sage: QuiverMutationType('A',5)
['A', 5]
sage: QuiverMutationType('B',2)
['B', 2]
sage: QuiverMutationType('B',5)
['B', 5]
sage: QuiverMutationType('C',2)
['B', 2]
sage: QuiverMutationType('C',5)
['C', 5]
sage: QuiverMutationType('D',2)
[ ['A', 1], ['A', 1] ]
sage: QuiverMutationType('D',3)
['A', 3]
sage: QuiverMutationType('D',4)
['D', 4]
sage: QuiverMutationType('E',6)
['E', 6]
sage: QuiverMutationType('G',2)
['G', 2]
sage: QuiverMutationType('A',(1,0),1)
['A', 1]
sage: QuiverMutationType('A',(2,0),1)
[ ['A', 1], ['A', 1] ]
sage: QuiverMutationType('A',(7,0),1)
['D', 7]
Affine types::
sage: QuiverMutationType('A',(1,1),1)
['A', [1, 1], 1]
sage: QuiverMutationType('A',(2,4),1)
['A', [2, 4], 1]
sage: QuiverMutationType('BB',2,1)
['BB', 2, 1]
sage: QuiverMutationType('BB',4,1)
['BB', 4, 1]
sage: QuiverMutationType('CC',2,1)
['CC', 2, 1]
sage: QuiverMutationType('CC',4,1)
['CC', 4, 1]
sage: QuiverMutationType('BC',1,1)
['BC', 1, 1]
sage: QuiverMutationType('BC',5,1)
['BC', 5, 1]
sage: QuiverMutationType('BD',3,1)
['BD', 3, 1]
sage: QuiverMutationType('BD',5,1)
['BD', 5, 1]
sage: QuiverMutationType('CD',3,1)
['CD', 3, 1]
sage: QuiverMutationType('CD',5,1)
['CD', 5, 1]
sage: QuiverMutationType('D',4,1)
['D', 4, 1]
sage: QuiverMutationType('D',6,1)
['D', 6, 1]
sage: QuiverMutationType('E',6,1)
['E', 6, 1]
sage: QuiverMutationType('E',7,1)
['E', 7, 1]
sage: QuiverMutationType('E',8,1)
['E', 8, 1]
sage: QuiverMutationType('F',4,1)
['F', 4, 1]
sage: QuiverMutationType('F',4,-1)
['F', 4, -1]
sage: QuiverMutationType('G',2,1)
['G', 2, 1]
sage: QuiverMutationType('G',2,-1)
['G', 2, -1]
sage: QuiverMutationType('A',3,2) == QuiverMutationType('D',3,2)
True
Affine types using Kac's Notation::
sage: QuiverMutationType('A',1,1)
['A', [1, 1], 1]
sage: QuiverMutationType('B',5,1)
['BD', 5, 1]
sage: QuiverMutationType('C',5,1)
['CC', 5, 1]
sage: QuiverMutationType('A',2,2)
['BC', 1, 1]
sage: QuiverMutationType('A',7,2)
['CD', 4, 1]
sage: QuiverMutationType('A',8,2)
['BC', 4, 1]
sage: QuiverMutationType('D',6,2)
['BB', 5, 1]
sage: QuiverMutationType('E',6,2)
['F', 4, -1]
sage: QuiverMutationType('D',4,3)
['G', 2, -1]
Elliptic types::
sage: QuiverMutationType('E',6,[1,1])
['E', 6, [1, 1]]
sage: QuiverMutationType('F',4,[2,1])
['F', 4, [1, 2]]
sage: QuiverMutationType('G',2,[3,3])
['G', 2, [3, 3]]
Mutation finite types:
rank 2 cases::
sage: QuiverMutationType('R2',(1,1))
['A', 2]
sage: QuiverMutationType('R2',(1,2))
['B', 2]
sage: QuiverMutationType('R2',(1,3))
['G', 2]
sage: QuiverMutationType('R2',(1,4))
['BC', 1, 1]
sage: QuiverMutationType('R2',(1,5))
['R2', [1, 5]]
sage: QuiverMutationType('R2',(2,2))
['A', [1, 1], 1]
sage: QuiverMutationType('R2',(3,5))
['R2', [3, 5]]
Exceptional Derksen-Owen quivers::
sage: QuiverMutationType('X',6)
['X', 6]
(Mainly) mutation infinite types:
Infinite type E::
sage: QuiverMutationType('E',9)
['E', 8, 1]
sage: QuiverMutationType('E',10)
['E', 10]
sage: QuiverMutationType('E',12)
['E', 12]
sage: QuiverMutationType('AE',(2,3))
['AE', [2, 3]]
sage: QuiverMutationType('BE',5)
['BE', 5]
sage: QuiverMutationType('CE',5)
['CE', 5]
sage: QuiverMutationType('DE',6)
['DE', 6]
Grassmannian types::
sage: QuiverMutationType('GR',(2,4))
['A', 1]
sage: QuiverMutationType('GR',(2,6))
['A', 3]
sage: QuiverMutationType('GR',(3,6))
['D', 4]
sage: QuiverMutationType('GR',(3,7))
['E', 6]
sage: QuiverMutationType('GR',(3,8))
['E', 8]
sage: QuiverMutationType('GR',(3,10))
['GR', [3, 10]]
Triangular types::
sage: QuiverMutationType('TR',2)
['A', 3]
sage: QuiverMutationType('TR',3)
['D', 6]
sage: QuiverMutationType('TR',4)
['E', 8, [1, 1]]
sage: QuiverMutationType('TR',5)
['TR', 5]
T types::
sage: QuiverMutationType('T',(1,1,1))
['A', 1]
sage: QuiverMutationType('T',(1,1,4))
['A', 4]
sage: QuiverMutationType('T',(1,4,4))
['A', 7]
sage: QuiverMutationType('T',(2,2,2))
['D', 4]
sage: QuiverMutationType('T',(2,2,4))
['D', 6]
sage: QuiverMutationType('T',(2,3,3))
['E', 6]
sage: QuiverMutationType('T',(2,3,4))
['E', 7]
sage: QuiverMutationType('T',(2,3,5))
['E', 8]
sage: QuiverMutationType('T',(2,3,6))
['E', 8, 1]
sage: QuiverMutationType('T',(2,3,7))
['E', 10]
sage: QuiverMutationType('T',(3,3,3))
['E', 6, 1]
sage: QuiverMutationType('T',(3,3,4))
['T', [3, 3, 4]]
Reducible types::
sage: QuiverMutationType(['A',3],['B',4])
[ ['A', 3], ['B', 4] ]
"""
class QuiverMutationType_abstract(UniqueRepresentation, SageObject):
def _repr_(self):
return self._description
def plot(self, circular=False, directed=True):
return self.standard_quiver().plot(circular=circular, directed=directed)
def show(self, circular=False, directed=True):
self.plot( circular=circular, directed=directed ).show()
def letter(self):
return self._letter
def rank(self):
return self._rank
@cached_method
def b_matrix(self):
return _edge_list_to_matrix(self._digraph.edges(), list(range(self._rank)), [])
@cached_method
def standard_quiver(self):
from .quiver import ClusterQuiver
Q = ClusterQuiver(self._digraph)
Q._mutation_type = self
return Q
@cached_method
def cartan_matrix(self):
cmat = copy(self.b_matrix())
for i,j in cmat.nonzero_positions():
a = cmat[i,j]
if a > 0: cmat[i,j] = -a
for i in range(self._rank):
cmat[i,i] = 2
return cmat
def is_irreducible(self):
return self._info['irreducible']
def is_mutation_finite(self):
return self._info['mutation_finite']
def is_simply_laced(self):
return self._info['simply_laced']
def is_skew_symmetric(self):
return self._info['skew_symmetric']
def is_finite(self):
return self._info['finite']
def is_affine(self):
if self.is_irreducible():
return self._info['affine']
else:
return False
def is_elliptic(self):
if self.is_irreducible():
return self._info['elliptic']
else:
return False
def properties(self):
txt = '{} has rank {} and the following properties:'
print(txt.format(self, self.rank()))
s = "\t- {} {}"
print(s.format('irreducible: ', self.is_irreducible()))
print(s.format('mutation finite: ', self.is_mutation_finite()))
print(s.format('simply-laced: ', self.is_simply_laced()))
print(s.format('skew-symmetric: ', self.is_skew_symmetric()))
print(s.format('finite: ', self.is_finite()))
if self.is_irreducible():
print(s.format('affine: ', self.is_affine()))
print(s.format('elliptic: ', self.is_elliptic()))
class QuiverMutationType_Irreducible(QuiverMutationType_abstract):
def __init__(self, letter, rank, twist=None):
self._rank = None
self._bi_rank = None
self._graph = Graph()
self._digraph = DiGraph()
self._info = {}
self._info['irreducible'] = True
self._info['mutation_finite'] = False
self._info['simply_laced'] = False
self._info['skew_symmetric'] = False
self._info['finite'] = False
self._info['affine'] = False
self._info['elliptic'] = False
self._info['irreducible_components'] = False
if isinstance(rank, tuple):
rank = list(rank)
if isinstance(twist, tuple):
twist = list(twist)
self._letter = letter
self._twist = twist
data = [letter,rank,twist]
if letter == 'A':
if twist is None and rank in ZZ and rank > 0:
self._rank = rank
self._info['mutation_finite'] = True
self._info['simply_laced'] = True
self._info['skew_symmetric'] = True
self._info['finite'] = True
elif twist==1 and isinstance(rank, list) and len(rank) == 2 and all( rank[i] in ZZ and rank[i] >= 0 for i in [0,1] ) and rank != [0,0]:
if isinstance(rank, tuple):
rank = list( rank )
data[1] = rank
rank = sorted(rank)
self._bi_rank = rank
self._rank = sum( self._bi_rank )
self._info['mutation_finite'] = True
if self._rank > 2: self._info['simply_laced'] = True
self._info['skew_symmetric'] = True
if rank[0] > 0:
self._info['affine'] = True
elif rank[0] == 0:
self._info['finite'] = True
else:
_mutation_type_error( data )
if twist is None and self._rank == 1 or twist == 1 and self._rank == 1:
self._graph.add_vertex( 0 )
elif twist == 1 and self._bi_rank[0] == 1 and self._bi_rank[1] == 1:
self._graph.add_edge( 0,1,2 )
else:
for i in range( self._rank - 1 ):
self._graph.add_edge( i, i+1, 1 )
if twist == 1:
self._digraph.add_edge( self._rank - 1, 0, 1 )
for i in range( self._rank - 1 ):
if i < ( 2 * self._bi_rank[0] ) and i%2 == 0:
self._digraph.add_edge( i+1, i, 1 )
else:
self._digraph.add_edge( i, i+1, 1 )
elif letter == 'B':
if twist is None and rank in ZZ and rank > 1:
self._rank = rank
self._info['mutation_finite'] = True
self._info['finite'] = True
else:
_mutation_type_error( data )
for i in range( rank - 2 ):
self._graph.add_edge( i, i+1, 1 )
if (rank % 2 == 0):
self._graph.add_edge( rank-2, rank-1, (1,-2) )
else:
self._graph.add_edge( rank-2, rank-1, (2,-1) )
elif letter == 'C':
if twist is None and rank in ZZ and rank > 1:
self._rank = rank
self._info['mutation_finite'] = True
self._info['finite'] = True
else:
_mutation_type_error( data )
for i in range( rank - 2 ):
self._graph.add_edge( i, i+1, 1 )
if (rank % 2 == 0):
self._graph.add_edge( rank-2, rank-1, (2,-1) )
else:
self._graph.add_edge( rank-2, rank-1, (1,-2) )
elif letter == 'BB':
if twist == 1 and rank in ZZ and rank > 1:
self._rank = rank + 1
self._info['mutation_finite'] = True
self._info['affine'] = True
else:
_mutation_type_error( data )
for i in range( rank - 2 ):
self._graph.add_edge( i, i+1, 1 )
if rank % 2 == 0:
self._graph.add_edge( rank-2, rank-1, (1,-2) )
else:
self._graph.add_edge( rank-2, rank-1, (2,-1) )
self._graph.add_edge( rank, 0 , (1,-2) )
elif letter == 'CC':
if twist == 1 and rank in ZZ and rank > 1:
self._rank = rank + 1
self._info['mutation_finite'] = True
self._info['affine'] = True
else:
_mutation_type_error( data )
for i in range( rank - 2 ):
self._graph.add_edge( i, i+1, 1 )
if rank % 2 == 0:
self._graph.add_edge( rank-2, rank-1, (2,-1) )
else:
self._graph.add_edge( rank-2, rank-1, (1,-2) )
self._graph.add_edge( rank, 0 , (2,-1) )
elif letter == 'BC':
if twist == 1 and rank in ZZ and rank >= 1:
self._rank = rank + 1
self._info['mutation_finite'] = True
self._info['affine'] = True
else:
_mutation_type_error( data )
if rank == 1:
self._graph.add_edge( 0,1,(1,-4) )
else:
for i in range( rank - 2 ):
self._graph.add_edge( i, i+1, 1 )
if (rank % 2 == 0):
self._graph.add_edge( rank-2, rank-1, (2,-1) )
else:
self._graph.add_edge( rank-2, rank-1, (1,-2) )
if twist == 1:
self._graph.add_edge( rank, 0 , (1,-2) )
elif letter == 'BD':
if twist == 1 and rank in ZZ and rank > 2:
self._rank = rank + 1
self._info['mutation_finite'] = True
self._info['affine'] = True
else:
_mutation_type_error( data )
for i in range( rank - 2 ):
self._graph.add_edge( i, i+1, 1 )
if (rank % 2 == 0):
self._graph.add_edge( rank-2, rank-1, (1,-2) )
else:
self._graph.add_edge( rank-2, rank-1, (2,-1) )
if twist == 1:
self._graph.add_edge( rank, 1 , 1 )
elif letter == 'CD':
if twist == 1 and rank in ZZ and rank > 2:
self._rank = rank + 1
self._info['mutation_finite'] = True
self._info['affine'] = True
else:
_mutation_type_error( data )
for i in range( rank - 2 ):
self._graph.add_edge( i, i+1, 1 )
if (rank % 2 == 0):
self._graph.add_edge( rank-2, rank-1, (2,-1) )
else:
self._graph.add_edge( rank-2, rank-1, (1,-2) )
if twist == 1:
self._graph.add_edge( rank, 1 , 1 )
elif letter == 'D':
if rank in ZZ and rank > 3 and twist is None:
self._rank = rank
self._info['mutation_finite'] = True
self._info['simply_laced'] = True
self._info['skew_symmetric'] = True
self._info['finite'] = True
elif twist == 1 and rank in ZZ and rank > 3:
self._rank = rank + 1
self._info['mutation_finite'] = True
self._info['simply_laced'] = True
self._info['skew_symmetric'] = True
self._info['affine'] = True
else:
_mutation_type_error( data )
for i in range( rank - 2 ):
self._graph.add_edge( i, i+1, 1 )
self._graph.add_edge( rank-3, rank-1, 1 )
if twist is not None:
self._graph.add_edge( rank, 1 ,1 )
elif letter == 'E':
if rank in [6,7,8] and twist is None:
self._rank = rank
self._info['mutation_finite'] = True
self._info['simply_laced'] = True
self._info['skew_symmetric'] = True
self._info['finite'] = True
if rank == 6:
self._graph.add_edges( [ (0,1),(1,2),(2,3),(3,4),(2,5) ] )
elif rank == 7:
self._graph.add_edges([(0, 1), (1, 2), (2, 3),
(3, 4), (4, 5), (2, 6)])
elif rank == 8:
self._graph.add_edges([(0, 1), (1, 2), (2, 3),
(3, 4), (4, 5), (5, 6),(2, 7)])
elif rank in [6,7,8] and twist == 1:
self._rank = rank + 1
self._info['mutation_finite'] = True
self._info['simply_laced'] = True
self._info['skew_symmetric'] = True
self._info['affine'] = True
if rank == 6:
self._graph.add_edges( [ (0,1),(1,2),(2,3),(3,4),(2,5),(5,6) ] )
elif rank == 7:
self._graph.add_edges( [ (0,1),(1,2),(2,3),(3,4),(4,5),(5,6),(3,7) ] )
elif rank == 8:
self._graph.add_edges( [ (0,1),(1,2),(2,3),(3,4),(4,5),(5,6),(6,7),(2,8) ] )
elif rank in [6,7,8] and twist == [1,1]:
self._rank = rank + 2
self._info['mutation_finite'] = True
self._info['skew_symmetric'] = True
self._info['elliptic'] = True
if rank == 6:
self._digraph.add_edges( [ (0,1,1),(1,2,1),(3,2,1),(3,4,1),(5,6,1),(6,7,1),(5,1,1),(2,5,2),(5,3,1),(6,2,1) ] )
elif rank == 7:
self._digraph.add_edges( [ (1,0,1),(1,2,1),(2,3,1),(4,3,1),(4,5,1),(6,5,1),(7,8,1),(3,7,2),(7,2,1),(7,4,1),(8,3,1) ] )
elif rank == 8:
self._digraph.add_edges( [ (0,1,1),(1,9,1),(3,9,1),(3,4,1),(2,8,1),(2,1,1),(9,2,2),(2,3,1),(8,9,1),(5,4,1),(5,6,1),(7,6,1) ] )
elif rank > 9 and twist is None:
self._info['simply_laced'] = True
self._info['skew_symmetric'] = True
self._rank = rank
for i in range(rank-2):
self._graph.add_edge( i, i+1, 1 )
self._graph.add_edge( 2, rank-1 )
else:
_mutation_type_error(data)
elif letter == 'AE':
if isinstance(rank, list) and len(rank) == 2 and all( rank[i] in ZZ and rank[i] > 0 for i in [0,1] ) and twist is None:
if isinstance(rank, tuple):
rank = list( rank )
data[1] = rank
rank = sorted(rank)
self._bi_rank = rank
self._rank = sum( self._bi_rank ) + 1
if self._rank > 3: self._info['simply_laced'] = True
self._info['skew_symmetric'] = True
if self._bi_rank == [1,1]:
self._graph.add_edges( [(0,1,2),(1,2,None)] )
else:
self._digraph.add_edge( self._rank - 2, 0 )
for i in range(self._rank-2):
if i < ( 2 * self._bi_rank[0] ) and i%2 == 0:
self._digraph.add_edge(i+1,i)
else:
self._digraph.add_edge(i,i+1)
self._digraph.add_edge(self._rank-2,self._rank-1)
else:
_mutation_type_error( data )
elif letter == 'BE':
if rank >4 and twist is None:
self._rank = rank
for i in range(rank-3):
self._graph.add_edge( i, i+1 )
self._graph.add_edge( 2, rank-1 )
if rank%2 == 0:
self._graph.add_edge( rank-3,rank-2,(2,-1) )
else:
self._graph.add_edge( rank-3,rank-2,(1,-2) )
else:
_mutation_type_error( data )
elif letter == 'CE':
if rank >4 and twist is None:
self._rank = rank
for i in range(rank-3):
self._graph.add_edge( i, i+1 )
self._graph.add_edge( 2, rank-1 )
if rank%2 == 0:
self._graph.add_edge( rank-3,rank-2,(1,-2) )
else:
self._graph.add_edge( rank-3,rank-2,(2,-1) )
else:
_mutation_type_error( data )
elif letter == 'DE':
if rank >5 and twist is None:
self._rank = rank
self._info['simply_laced'] = True
self._info['skew_symmetric'] = True
for i in range(rank-3):
self._graph.add_edge( i, i+1 )
self._graph.add_edge( 2, rank-2 )
self._graph.add_edge( rank-4, rank-1 )
else:
_mutation_type_error( data )
elif letter == 'F':
if rank == 4 and twist is None:
self._rank = rank
self._info['mutation_finite'] = True
self._info['finite'] = True
self._graph.add_edges( [ (0,1,None),(1,2,(2,-1)),(2,3,None) ] )
elif rank == 4 and twist == 1:
self._rank = rank + 1
self._info['mutation_finite'] = True
self._info['affine'] = True
self._graph.add_edges( [ (0,1,None), (1,2,None),
(2,3,(1,-2)),(3,4,None) ] )
elif rank == 4 and twist == -1:
self._rank = rank + 1
self._info['mutation_finite'] = True
self._info['affine'] = True
self._graph.add_edges( [ (0,1,None), (1,2,None),
(2,3,(2,-1)),(3,4,None) ] )
elif rank == 4 and (twist == [1,2]):
self._rank = rank + 2
self._info['mutation_finite'] = True
self._info['elliptic'] = True
self._digraph.add_edges( [ (0,1,None), (1,2,None),
(2,3,(2,-1)), (4,2,(1,-2)),
(3,4,2), (4,5,None), (5,3,None) ])
elif rank == 4 and (twist == [2,1]):
self._rank = rank + 2
self._info['mutation_finite'] = True
self._info['elliptic'] = True
self._digraph.add_edges( [ (0,1,None), (1,2,None),
(2,3,(1,-2)), (4,2,(2,-1)),
(3,4,2), (4,5,None), (5,3,None) ])
elif rank == 4 and twist == [2,2]:
self._rank = rank + 2
self._info['mutation_finite'] = True
self._info['elliptic'] = True
self._digraph.add_edges( [ (0,1,None), (1,2,None),
(3,1,None), (2,3,2),
(4,2,(2,-1)), (3,4,(1,-2)),
(5,4,None) ] )
elif rank == 4 and twist == [1,1]:
self._rank = rank + 2
self._info['mutation_finite'] = True
self._info['elliptic'] = True
self._digraph.add_edges( [ (0,1,None), (1,2,None),
(3,1,None), (2,3,2), (4,2,(1,-2)),
(3,4,(2,-1)), (5,4,None) ] )
else:
_mutation_type_error( data )
elif letter == 'G':
if rank == 2 and twist is None:
self._rank = rank
self._info['mutation_finite'] = True
self._info['finite'] = True
self._graph.add_edges( [ (0,1,(1,-3)) ] )
elif rank == 2 and twist == -1:
self._rank = rank + 1
self._info['mutation_finite'] = True
self._info['affine'] = True
self._graph.add_edges( [ (0,1,None),(1,2,(1,-3)) ] )
elif rank == 2 and twist == 1:
self._rank = rank + 1
self._info['mutation_finite'] = True
self._info['affine'] = True
self._graph.add_edges( [ (0,1,None),(1,2,(3,-1)) ] )
elif rank == 2 and (twist == [1,3]):
self._rank = rank + 2
self._info['mutation_finite'] = True
self._info['elliptic'] = True
self._digraph.add_edges( [ (0,1,None), (1,2,(3,-1)),
(3,1,(1,-3)), (2,3,2)] )
elif rank == 2 and (twist == [3,1]):
self._rank = rank + 2
self._info['mutation_finite'] = True
self._info['elliptic'] = True
self._digraph.add_edges( [ (0,1,None), (1,2,(1,-3)),
(3,1,(3,-1)), (2,3,2)] )
elif rank == 2 and twist == [3,3]:
self._rank = rank + 2
self._info['mutation_finite'] = True
self._info['elliptic'] = True
self._digraph.add_edges( [ (1,0,None), (0,2,2), (3,0,(3,-1)),
(2,1,None), (2,3, (1,-3))])
elif rank == 2 and twist == [1,1]:
self._rank = rank + 2
self._info['mutation_finite'] = True
self._info['elliptic'] = True
self._digraph.add_edges( [ (1,0,None), (0,2,2), (3,0,(1,-3)),
(2,1,None), (2,3,(3,-1)) ] )
else:
_mutation_type_error( data )
elif letter == 'GR':
if twist is None and isinstance(rank, list) and len(rank) == 2 and all( rank[i] in ZZ and rank[i] > 0 for i in [0,1] ) and rank[1] - 1 > rank[0] > 1:
gr_rank = (rank[0]-1,rank[1]-rank[0]-1)
self._rank = prod(gr_rank)
self._info['simply_laced'] = True
self._info['skew_symmetric'] = True
a,b = gr_rank
for i in range(a):
for j in range(b):
if i < a-1:
if (i+j) % 2 == 0:
self._digraph.add_edge(i*b+j,(i+1)*b+j)
else:
self._digraph.add_edge((i+1)*b+j,i*b+j)
if j < b-1:
if (i+j) % 2 == 0:
self._digraph.add_edge(i*b+j+1,i*b+j)
else:
self._digraph.add_edge(i*b+j,i*b+j+1)
else:
_mutation_type_error( data )
elif letter == 'R2':
if twist is None and isinstance(rank, list) and len(rank) == 2 and all( rank[i] in ZZ and rank[i] > 0 for i in [0,1] ):
rank = sorted(rank)
b,c = rank
self._rank = 2
if b == c: self._info['skew_symmetric'] = True
self._graph.add_edge(0,1,(b,-c))
else:
_mutation_type_error( data )
elif letter == 'T':
if twist is None and isinstance(rank, list) and len(rank) == 3 and all( rank[i] in ZZ and rank[i] > 0 for i in [0,1,2] ):
if isinstance(rank, tuple):
rank = list( rank )
data[1] = rank
rank = sorted( rank )
self._rank = sum( rank ) - 2
self._info['simply_laced'] = True
self._info['skew_symmetric'] = True
r,p,q = rank
for i in range(q-1):
if i == 0:
self._graph.add_edge(0,1)
self._graph.add_edge(0,r)
self._graph.add_edge(0,r+p-1)
else:
if i < r-1:
self._graph.add_edge(i,i+1)
if i < p-1:
self._graph.add_edge(i+r-1,i+r)
self._graph.add_edge(i+r+p-2,i+r+p-1)
else:
_mutation_type_error( data )
elif letter == 'TR':
if twist is None and rank == 1:
self._graph.add_vertex( 0 )
elif twist is None and rank > 1:
self._rank = rank*(rank+1)//2
self._info['simply_laced'] = True
self._info['skew_symmetric'] = True
level = 0
while level < rank:
nr = rank*level-sum(range(level))
for i in range(nr,nr+rank-level-1):
self._digraph.add_edge(i,i+1)
self._digraph.add_edge(i+rank-level,i)
self._digraph.add_edge(i+1,i+rank-level)
level += 1
else:
_mutation_type_error( data )
elif letter == 'X':
if rank in [6,7] and twist is None:
self._rank = rank
self._info['mutation_finite'] = True
self._info['skew_symmetric'] = True
self._digraph.add_edges( [ (0,1,2),(1,2,None),(2,0,None),
(2,3,None),(3,4,2),(4,2,None),
(2,5,None) ] )
if rank == 7:
self._digraph.add_edges( [ (5,6,2),(6,2,None) ] )
else:
_mutation_type_error( data )
else:
_mutation_type_error( data )
if not self._digraph:
if self._graph.is_bipartite():
self._digraph = _bipartite_graph_to_digraph( self._graph )
else:
raise ValueError('The QuiverMutationType does not have '
'a Coxeter diagram.')
if not self._graph:
self._graph = self._digraph.to_undirected()
if twist: self._description = str( [letter,rank,twist] )
else: self._description = str( [letter,rank] )
def irreducible_components( self ):
return tuple([self])
@cached_method
def class_size(self):
if not self.is_mutation_finite():
return infinity
if self._letter == 'A':
if self.is_finite():
n = self._rank
a = binomial( 2*(n+1), n+1 ) // (n+2)
if n % 2 == 1:
a += binomial( n+1, (n+1)//2 )
if n % 3 == 0:
a += 2 * binomial( 2*n//3, n//3 )
return a // (n+3)
elif self.is_affine():
i,j = self._bi_rank
i = ZZ(i)
j = ZZ(j)
n = i+j
f = Euler_Phi()
if i == j:
return ( binomial( 2*i,i ) +
sum( f(k) * binomial(2*i//k,i//k)**2
for k in [k for k in i.divisors()
if k in j.divisors()] ) // n ) // 4
else:
return sum( f(k) * binomial(2*i//k,i//k) *
binomial(2*j//k,j//k)
for k in [k for k in i.divisors()
if k in j.divisors()] ) // ( 2 * n )
elif self._letter in ['B', 'C']:
if self.is_finite():
n = self._rank
return binomial(2 * n, n) // (n + 1)
elif self._letter in ['BB','CC']:
# these two formulas are not yet proven
print(Warning("Warning: This method uses a formula "
"which has not been proved correct."))
if self.is_affine():
if self._twist == 1:
n = self._rank - 1
if n%2==1:
return binomial( 2*n-1, n-1 )
else:
return binomial( 2*n-1, n-1 ) + binomial( n-1, n//2 -1 )
# type BC (affine)
elif self._letter == 'BC':
# this formula is not yet proven
print(Warning("Warning: This method uses a formula "
"which has not been proved correct."))
if self.is_affine():
if self._twist == 1:
n = self._rank - 1
return binomial( 2*n, n )
# types BD and CD (affine)
elif self._letter in ['BD','CD']:
# this formula is not yet proven
print(Warning("Warning: This method uses a formula "
"which has not been proved correct."))
if self.is_affine():
if self._twist == 1:
n = self._rank - 2
return 2*binomial( 2*n, n )
# type D (finite and affine)
elif self._letter == 'D':
# the formula is taken from Bastian, Prellberg, Rubey, Stump
if self.is_finite():
if self._rank == 4:
return 6
else:
f = Euler_Phi()
n = ZZ(self._rank)
return sum( f( n//k ) * binomial( 2*k, k )
for k in n.divisors() ) // (2*n)
# this formula is not yet proven
elif self.is_affine():
n = self._rank - 3
if n == 2:
return 9
else:
print(Warning ("Warning: This method uses a formula "
"which has not been proved correct."))
if n%2==1:
return 2*binomial(2*n,n)
else:
return 2*binomial(2*n,n) + binomial(n, n//2)
# the exceptional types are hard-coded
# type E (finite, affine and elliptic)
elif self._letter == 'E':
if self.is_finite():
if self._rank == 6:
return 67
elif self._rank == 7:
return 416
elif self._rank == 8:
return 1574
elif self.is_affine():
if self._rank == 7:
return 132
elif self._rank == 8:
return 1080
elif self._rank == 9:
return 7560
elif self.is_elliptic():
if self._rank == 8:
return 49
elif self._rank == 9:
return 506
elif self._rank == 10:
return 5739
# type F
elif self._letter == 'F':
if self.is_finite():
return 15
elif self.is_affine():
return 60
elif self.is_elliptic():
if self._twist == [1,2]:
return 90
if self._twist == [1,1] or self._twist == [2,2]:
return 35
# type G
elif self._letter == 'G':
if self.is_finite():
return 2
elif self.is_affine():
return 6
elif self.is_elliptic():
if self._twist == [1,3]:
return 7
if self._twist == [1,1] or self._twist == [3,3]:
return 2
# type X
elif self._letter == 'X':
if self._rank == 6:
return 5
elif self._rank == 7:
return 2
# otherwise the size is returned to be unknown
else:
print("Size unknown")
return NotImplemented
def dual(self):
letter = self.letter()
# the self-dual cases
if letter != 'BC' and letter[0] in ['B','C']:
if letter == 'BB': letter = 'CC'
elif letter == 'CC': letter = 'BB'
elif letter[0] == 'B': letter = 'C' + letter[1:]
elif letter[0] == 'C': letter = 'B' + letter[1:]
rank = self._rank
if self.is_affine():
rank -= 1
twist = self._twist
return QuiverMutationType(letter,rank,twist)
# the cases F and G have non-trivial duality in some cases
elif letter in ['F','G']:
if self.is_finite(): return self
elif self.is_affine():
rank = self._rank - 1
twist = - self._twist
elif self.is_elliptic():
twist = self._twist
rank = self._rank - 2
if letter == 'F':
if self._twist == [2,2]:
twist == [1,1]
if self._twist == [1,1]:
twist == [2,2]
if letter == 'G':
if self._twist == [3,3]:
twist = [1,1]
elif self._twist == [1,1]:
twist = [3,3]
else: rank = self._rank
return QuiverMutationType(letter,rank,twist)
else:
return self
class QuiverMutationType_Reducible(QuiverMutationType_abstract):
def __init__(self, *args):
data = args
if len(data) < 2 or not all( isinstance(comp, QuiverMutationType_Irreducible) for comp in data ):
return _mutation_type_error(data)
# _info is initialized
self._info = {}
self._info['irreducible'] = False
self._info['mutation_finite'] = all(comp.is_mutation_finite()
for comp in data)
self._info['simply_laced'] = all(comp.is_simply_laced()
for comp in data)
self._info['skew_symmetric'] = all(comp.is_skew_symmetric()
for comp in data)
self._info['finite'] = all(comp.is_finite() for comp in data)
self._info['irreducible_components'] = copy(data)
# letter and rank are initialized
self._letter = ''
self._rank = 0
# graph and digraph are initialized
self._graph = Graph()
self._digraph = DiGraph()
for comp in data:
if self._letter:
self._letter += ' x '
self._letter += comp._letter
self._rank += comp._rank
self._graph = self._graph.disjoint_union(comp._graph,
labels='integers')
self._digraph = self._digraph.disjoint_union(comp._digraph,
labels='integers')
self._graph.name('')
self._digraph.name('')
# _description is as for CartanType
self._description = "[ "
comps = self.irreducible_components()
for i in range(len(comps)):
if i > 0: self._description += ", "
self._description += comps[i]._description
self._description += " ]"
def irreducible_components( self ):
return self._info['irreducible_components']
@cached_method
def class_size(self):
if not self.is_mutation_finite():
return infinity
else:
components = []
multiplicities = []
for x in self.irreducible_components():
if components.count(x) == 0:
components.append(x)
multiplicities.append(1)
else:
y = components.index(x)
multiplicities[y] = multiplicities[y]+1
sizes = [ x.class_size() for x in components ]
if NotImplemented in sizes:
print("Size unknown")
return NotImplemented
else:
return prod( [binomial(sizes[i]+multiplicities[i]-1,
multiplicities[i] ) for i in range (0,len(sizes))])
def dual(self):
comps = self.irreducible_components()
return QuiverMutationType( [comp.dual() for comp in comps ] )
def _construct_classical_mutation_classes(n):
from sage.combinat.cluster_algebra_quiver.quiver import ClusterQuiver
data = {}
# finite A
data[ ('A',n) ] = ClusterQuiver(['A',n]).mutation_class(data_type='dig6')
# affine A
for j in range(1, n//2+1):
data[ ('A',(n-j,j),1) ] = ClusterQuiver(['A',[n-j,j],1]).mutation_class(data_type='dig6')
# finite B
if n > 1:
data[ ('B',n) ] = ClusterQuiver(['B',n]).mutation_class(data_type='dig6')
# affine B
if n > 2:
data[ ('BB',n-1,1) ] = ClusterQuiver(['BB',n-1,1]).mutation_class(data_type='dig6')
# finite C
if n > 2:
data[ ('C',n) ] = ClusterQuiver(['C',n]).mutation_class(data_type='dig6')
# affine C
if n > 1:
data[ ('BC',n-1,1) ] = ClusterQuiver(['BC',n-1,1]).mutation_class(data_type='dig6')
# affine CC
if n > 2:
data[ ('CC',n-1,1) ] = ClusterQuiver(['CC',n-1,1]).mutation_class(data_type='dig6')
# affine BD
if n > 3:
data[ ('BD',n-1,1) ] = ClusterQuiver(['BD',n-1,1]).mutation_class(data_type='dig6')
# affine CD
if n > 3:
data[ ('CD',n-1,1) ] = ClusterQuiver(['CD',n-1,1]).mutation_class(data_type='dig6')
# finite D
if n > 3:
data[ ('D',n) ] = ClusterQuiver(['D',n]).mutation_class(data_type='dig6')
# affine D
if n > 4:
data[ ('D',n-1,1) ] = ClusterQuiver(['D',n-1,1]).mutation_class(data_type='dig6')
return data
def _construct_exceptional_mutation_classes(n):
from sage.combinat.cluster_algebra_quiver.quiver import ClusterQuiver
data = {}
# finite E
if n in [6,7,8]:
data[ ('E',n) ] = ClusterQuiver(['E',n]).mutation_class(data_type='dig6')
# affine E
if n in [7,8,9]:
data[ ('E',n-1,1) ] = ClusterQuiver(['E',n-1,1]).mutation_class(data_type='dig6')
# elliptic E
if n in [8,9,10]:
data[ ('E',n-2,(1,1)) ] = ClusterQuiver(['E',n-2,[1,1]]).mutation_class(data_type='dig6')
# finite F
if n == 4:
data[ ('F',4) ] = ClusterQuiver(['F',4]).mutation_class(data_type='dig6')
# affine F
if n == 5:
data[ ('F',4,1) ] = ClusterQuiver(['F',4,1]).mutation_class(data_type='dig6')
data[ ('F',4,-1) ] = ClusterQuiver(['F',4,-1]).mutation_class(data_type='dig6')
# finite G
if n == 2:
data[ ('G',2) ] = ClusterQuiver(['G',2]).mutation_class(data_type='dig6')
# affine G
if n == 3:
data[ ('G',2,1) ] = ClusterQuiver(['G',2,1]).mutation_class(data_type='dig6')
data[ ('G',2,-1) ] = ClusterQuiver(['G',2,-1]).mutation_class(data_type='dig6')
# elliptic G
if n == 4:
data[ ('G',2,(1,3)) ] = ClusterQuiver(['G',2,(1,3)]).mutation_class(data_type='dig6')
data[ ('G',2,(1,1)) ] = ClusterQuiver(['G',2,(1,1)]).mutation_class(data_type='dig6')
data[ ('G',2,(3,3)) ] = ClusterQuiver(['G',2,(3,3)]).mutation_class(data_type='dig6')
# X
if n in [6,7]:
data[ ('X',n) ] = ClusterQuiver(['X',n]).mutation_class(data_type='dig6')
# elliptic F
if n == 6:
data[ ('F',4,(1,2)) ] = ClusterQuiver(['F',4,(1,2)]).mutation_class(data_type='dig6')
data[ ('F',4,(1,1)) ] = ClusterQuiver(['F',4,(1,1)]).mutation_class(data_type='dig6')
data[ ('F',4,(2,2)) ] = ClusterQuiver(['F',4,(2,2)]).mutation_class(data_type='dig6')
return data
def _save_data_dig6(n, types='ClassicalExceptional', verbose=False):
import os.path
from six.moves import cPickle
data = {}
possible_types = ['Classical', 'ClassicalExceptional', 'Exceptional']
if types not in possible_types:
raise ValueError('The third input must be either ClassicalExceptional'
' (default), Classical, or Exceptional.')
if types in possible_types[:2]:
data.update(_construct_classical_mutation_classes(n))
if types in possible_types[1:]:
data.update(_construct_exceptional_mutation_classes(n))
from sage.env import DOT_SAGE
from sage.misc.misc import sage_makedirs
types_path = os.path.join(DOT_SAGE, 'cluster_algebra_quiver')
types_file = os.path.join(types_path,'mutation_classes_%s.dig6'%n)
sage_makedirs(types_path)
from sage.misc.temporary_file import atomic_write
with atomic_write(types_file, binary=True) as f:
cPickle.dump(data, f)
if verbose:
keys = sorted(data.keys(),key=str)
print("\nThe following types are saved to file", types_file,"and will now be used to determine quiver mutation types:")
print(keys)
def save_quiver_data(n, up_to=True, types='ClassicalExceptional', verbose=True):
from sage.combinat.cluster_algebra_quiver.mutation_type import load_data
if up_to is True:
ranks = range(1,n+1)
elif up_to is False:
ranks = [n]
for i in ranks:
_save_data_dig6(i,types=types,verbose=verbose)
# we finally clear the load_data
load_data.clear_cache()
def _bipartite_graph_to_digraph(g):
if not g.is_bipartite():
raise ValueError('The input graph is not bipartite.')
order = g.bipartite_sets()
dg = DiGraph()
for edge in g.edges():
if edge[0] in order[0]:
dg.add_edge( edge[0], edge[1], edge[2] )
else:
dg.add_edge( edge[1], edge[0], edge[2] )
for vert in g.vertex_iterator():
if vert not in dg.vertices():
dg.add_vertex(vert)
return dg
def _is_mutation_type(data):
try:
QuiverMutationType(data)
return True
except Exception:
return False
def _mutation_type_error(data):
if data[2] is None:
del data[2]
return_str = str(data) + ' is not a valid quiver mutation type'
return_str += '\n Finite types have the form [ \'?\', n ] for type ? and rank n'
return_str += '\n Affine type A has the form [ \'A\', [ i, j ], 1 ] for rank i+j'
return_str += '\n Affine type ? has the form [ \'?\', k, \\pm 1 ] for rank k+1'
return_str += '\n Elliptic type ? has the form [ \'?\', k, [i, j] ] (1 <= i,j <= 3) for rank k+2'
return_str += '\n For correct syntax in other types, please consult the documentation.'
raise ValueError(return_str)
def _edge_list_to_matrix(edges, nlist, mlist):
n = len(nlist)
m = len(mlist)
nmlist = nlist + mlist
M = matrix(ZZ, n + m, n, sparse=True)
for edge in edges:
if edge[2] is None:
edge = (edge[0], edge[1], (1, -1))
elif edge[2] in ZZ:
edge = (edge[0], edge[1], (edge[2], -edge[2]))
v1, v2, (a, b) = edge
if v1 in nlist:
M[nmlist.index(v2), nmlist.index(v1)] = b
if v2 in nlist:
M[nmlist.index(v1), nmlist.index(v2)] = a
return M
| true
| true
|
f7056ab25ad90481d8aa15bfd7dbc66c04b7c3ea
| 4,314
|
py
|
Python
|
backtrader/observers/benchmark.py
|
trinh-hoang-hiep/iching
|
e1feae5741c3cbde535d7a275b01d4f0cf9e21ed
|
[
"Apache-2.0"
] | 1
|
2021-04-09T06:24:08.000Z
|
2021-04-09T06:24:08.000Z
|
backtrader/observers/benchmark.py
|
trinh-hoang-hiep/iching
|
e1feae5741c3cbde535d7a275b01d4f0cf9e21ed
|
[
"Apache-2.0"
] | null | null | null |
backtrader/observers/benchmark.py
|
trinh-hoang-hiep/iching
|
e1feae5741c3cbde535d7a275b01d4f0cf9e21ed
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8; py-indent-offset:4 -*-
###############################################################################
#
# Copyright (C) 2015-2020 Daniel Rodriguez
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import backtrader as bt
from . import TimeReturn
class Benchmark(TimeReturn):
'''This observer stores the *returns* of the strategy and the *return* of a
reference asset which is one of the datas passed to the system.
Params:
- ``timeframe`` (default: ``None``)
If ``None`` then the complete return over the entire backtested period
will be reported
- ``compression`` (default: ``None``)
Only used for sub-day timeframes to for example work on an hourly
timeframe by specifying "TimeFrame.Minutes" and 60 as compression
- ``data`` (default: ``None``)
Reference asset to track to allow for comparison.
.. note:: this data must have been added to a ``cerebro`` instance with
``addata``, ``resampledata`` or ``replaydata``.
- ``_doprenext`` (default: ``False``)
Benchmarking will take place from the point at which the strategy kicks
in (i.e.: when the minimum period of the strategy has been met).
Setting this to ``True`` will record benchmarking values from the
starting point of the data feeds
- ``firstopen`` (default: ``False``)
Keepint it as ``False`` ensures that the 1st comparison point between
the value and the benchmark starts at 0%, because the benchmark will
not use its opening price.
See the ``TimeReturn`` analyzer reference for a full explanation of the
meaning of the parameter
- ``fund`` (default: ``None``)
If ``None`` the actual mode of the broker (fundmode - True/False) will
be autodetected to decide if the returns are based on the total net
asset value or on the fund value. See ``set_fundmode`` in the broker
documentation
Set it to ``True`` or ``False`` for a specific behavior
Remember that at any moment of a ``run`` the current values can be checked
by looking at the *lines* by name at index ``0``.
'''
_stclock = True
lines = ('benchmark',)
plotlines = dict(benchmark=dict(_name='Benchmark'))
params = (
('data', None),
('_doprenext', False),
# Set to false to ensure the asset is measured at 0% in the 1st tick
('firstopen', False),
('fund', None)
)
def _plotlabel(self):
labels = super(Benchmark, self)._plotlabel()
labels.append(self.p.data._name)
return labels
def __init__(self):
if self.p.data is None: # use the 1st data in the system if none given
self.p.data = self.data0
super(Benchmark, self).__init__() # treturn including data parameter
# Create a time return object without the data
kwargs = self.p._getkwargs()
kwargs.update(data=None) # to create a return for the stratey
t = self._owner._addanalyzer_slave(bt.analyzers.TimeReturn, **kwargs)
# swap for consistency
self.treturn, self.tbench = t, self.treturn
def next(self):
super(Benchmark, self).next()
self.lines.benchmark[0] = self.tbench.rets.get(self.treturn.dtkey,
float('NaN'))
def prenext(self):
if self.p._doprenext:
super(TimeReturn, self).prenext()
| 35.95
| 79
| 0.62077
| true
| true
|
|
f7056c6430a204c2507a84847929823af3d8b505
| 8,334
|
py
|
Python
|
autotest/test_gwf_lakobs01.py
|
mkennard-aquaveo/modflow6
|
73a0553636362c90f7d134318e1f5d902dbdc4d3
|
[
"CC0-1.0"
] | null | null | null |
autotest/test_gwf_lakobs01.py
|
mkennard-aquaveo/modflow6
|
73a0553636362c90f7d134318e1f5d902dbdc4d3
|
[
"CC0-1.0"
] | null | null | null |
autotest/test_gwf_lakobs01.py
|
mkennard-aquaveo/modflow6
|
73a0553636362c90f7d134318e1f5d902dbdc4d3
|
[
"CC0-1.0"
] | null | null | null |
# Test for checking lak observation input. The following observation types:
# 'lak', 'wetted-area', and 'conductance,' require that ID2 be provided when
# ID is an integer corresponding to a lake number and not BOUNDNAME.
# See table in LAK Package section of mf6io.pdf for an explanation of ID,
# ID2, and Observation Type.
import os
import pytest
import sys
import numpy as np
try:
import flopy
except:
msg = "Error. FloPy package is not available.\n"
msg += "Try installing using the following command:\n"
msg += " pip install flopy"
raise Exception(msg)
from framework import testing_framework
from simulation import Simulation
import targets
mf6_exe = os.path.abspath(targets.target_dict["mf6"])
ex = "gwf_lakobs_01a"
exdir = os.path.join("temp", ex)
# store global gwf for subsequent plotting
gwf = None
def get_idomain(nlay, nrow, ncol, lakend):
idomain = np.ones((nlay, nrow, ncol), dtype=int)
for k, j in enumerate(lakend):
idomain[k, 0, 0:j] = 0
return idomain
def build_model():
lx = 300.0
lz = 45.0
nlay = 45
nrow = 1
ncol = 30
nper = 1
delc = 1.0
delr = lx / ncol
delz = lz / nlay
top = 5.0
botm = [top - (k + 1) * delz for k in range(nlay)]
perlen = [20.0]
nstp = [1]
tsmult = [1.0]
Kh = 1.0
Kv = 1.0
tdis_rc = []
for i in range(nper):
tdis_rc.append((perlen[i], nstp[i], tsmult[i]))
nouter, ninner = 700, 300
hclose, rclose, relax = 1e-8, 1e-6, 0.97
name = ex
# build MODFLOW 6 files
ws = exdir
sim = flopy.mf6.MFSimulation(
sim_name=name, version="mf6", exe_name=mf6_exe, sim_ws=ws
)
# create tdis package
tdis = flopy.mf6.ModflowTdis(
sim, time_units="DAYS", nper=nper, perioddata=tdis_rc
)
# create gwf model
gwfname = name
global gwf
gwf = flopy.mf6.ModflowGwf(sim, modelname=gwfname, newtonoptions="NEWTON")
imsgwf = flopy.mf6.ModflowIms(
sim,
print_option="SUMMARY",
outer_dvclose=hclose,
outer_maximum=nouter,
under_relaxation="NONE",
inner_maximum=ninner,
inner_dvclose=hclose,
rcloserecord=rclose,
linear_acceleration="BICGSTAB",
scaling_method="NONE",
reordering_method="NONE",
relaxation_factor=relax,
filename="{}.ims".format(gwfname),
)
# number of columns to be a lake for layer 1, 2, , ... len(lakend)
lakend = [10, 9, 8, 7, 6]
idomain = get_idomain(nlay, nrow, ncol, lakend)
dis = flopy.mf6.ModflowGwfdis(
gwf,
nlay=nlay,
nrow=nrow,
ncol=ncol,
delr=delr,
delc=delc,
top=top,
botm=botm,
idomain=idomain,
)
# initial conditions
strt = np.zeros((nlay, nrow, ncol), dtype=float)
strt += top
ic = flopy.mf6.ModflowGwfic(gwf, strt=strt)
# node property flow
npf = flopy.mf6.ModflowGwfnpf(
gwf,
xt3doptions=False,
save_flows=True,
save_specific_discharge=True,
icelltype=1,
k=Kh,
k33=Kv,
)
sy = 0.3
ss = np.zeros((nlay, nrow, ncol), dtype=float)
# ss[0, :, :] = sy
idx = np.where(idomain == 0)
for k, i, j in zip(*idx):
ss[k + 1, i, j] = 0.0 # sy
sto = flopy.mf6.ModflowGwfsto(gwf, sy=sy, ss=ss, iconvert=1)
irch = np.zeros((nrow, ncol), dtype=int)
lake_vconnect = []
idx = np.where(idomain == 0)
for k, i, j in zip(*idx):
if idomain[k + 1, i, j] == 1:
lake_vconnect.append((k + 1, i, j))
irch[i, j] = k + 1
nlakeconn = len(lake_vconnect)
# pak_data = [lakeno, strt, nlakeconn]
initial_stage = 0.1
pak_data = [(0, initial_stage, nlakeconn)]
bedleak = 100.0 # "None"
belev = 0.0
con_data = [
(0, i, idx, "VERTICAL", bedleak, belev, -99, -99, -99)
for i, idx in enumerate(lake_vconnect)
]
# period data
p_data = [
(0, "STATUS", "ACTIVE"),
]
# note: for specifying lake number, use fortran indexing!
fname = "{}.lak.obs.csv".format(gwfname)
lak_obs = {
fname: [
("lakestage", "stage", 1),
("lakevolume", "volume", 1),
("lak1", "lak", 1),
],
"digits": 10,
}
lak = flopy.mf6.modflow.ModflowGwflak(
gwf,
surfdep=0.0,
save_flows=True,
print_input=True,
print_flows=True,
print_stage=True,
stage_filerecord="{}.lak.bin".format(gwfname),
budget_filerecord="{}.lak.bud".format(gwfname),
nlakes=len(pak_data),
ntables=0,
packagedata=pak_data,
pname="LAK-1",
connectiondata=con_data,
perioddata=p_data,
observations=lak_obs,
)
chdspd = [((0, 0, ncol - 1), 5.0)]
chd = flopy.mf6.modflow.ModflowGwfchd(gwf, stress_period_data=chdspd)
rech = 0.0001 * np.ones((nrow, ncol), dtype=float)
# rech[:, 0:20] = 0.
rch = flopy.mf6.modflow.ModflowGwfrcha(
gwf, print_flows=True, save_flows=True, recharge=rech, irch=irch
)
# output control
oc = flopy.mf6.ModflowGwfoc(
gwf,
budget_filerecord="{}.cbc".format(gwfname),
head_filerecord="{}.hds".format(gwfname),
headprintrecord=[("COLUMNS", 10, "WIDTH", 15, "DIGITS", 6, "GENERAL")],
saverecord=[("HEAD", "ALL"), ("BUDGET", "ALL")],
printrecord=[("HEAD", "ALL"), ("BUDGET", "ALL")],
)
return sim
# - No need to change any code below
def test_mf6model():
# initialize testing framework
test = testing_framework()
# build the models
sim = build_model()
# write model input
sim.write_simulation()
# attempt to run model, should fail
sim.run_simulation()
# ensure that the error msg is contained in the mfsim.lst file
f = open(os.path.join(exdir, "mfsim.lst"), "r")
lines = f.readlines()
error_count = 0
expected_msg = False
for line in lines:
if "ID2 (iconn) is missing" in line:
expected_msg = True
error_count += 1
assert error_count == 1, (
"error count = " + str(error_count) + "but should equal 1"
)
# fix the error and attempt to rerun model
orig_fl = os.path.join(exdir, ex + ".lak.obs")
new_fl = os.path.join(exdir, ex + ".lak.obs.new")
sr = open(orig_fl, "r")
sw = open(new_fl, "w")
lines = sr.readlines()
error_free_line = " lak1 lak 1 1\n"
for line in lines:
if " lak " in line:
sw.write(error_free_line)
else:
sw.write(line)
sr.close()
sw.close()
# delete original and replace with corrected lab obs input
os.remove(orig_fl)
os.rename(new_fl, orig_fl)
# rerun the model, should be no errors
sim.run_simulation()
return
def main():
# initialize testing framework
test = testing_framework()
# build the models
sim = build_model()
# write model input
sim.write_simulation()
# attempt to run model, should fail
sim.run_simulation()
# ensure that the error msg is contained in the mfsim.lst file
f = open(os.path.join(exdir, "mfsim.lst"), "r")
lines = f.readlines()
error_count = 0
expected_msg = False
for line in lines:
if "ID2 (iconn) is missing" in line:
expected_msg = True
error_count += 1
assert error_count == 1, (
"error count = " + str(error_count) + ", but should equal 1"
)
# fix the error and attempt to rerun model
orig_fl = os.path.join(exdir, ex + ".lak.obs")
new_fl = os.path.join(exdir, ex + ".lak.obs.new")
sr = open(orig_fl, "r")
sw = open(new_fl, "w")
lines = sr.readlines()
error_free_line = " lak1 lak 1 1\n"
for line in lines:
if " lak " in line:
sw.write(error_free_line)
else:
sw.write(line)
sr.close()
sw.close()
# delete original and replace with corrected lab obs input
os.remove(orig_fl)
os.rename(new_fl, orig_fl)
# rerun the model, should be no errors
sim.run_simulation()
return
if __name__ == "__main__":
# print message
print("standalone run of {}".format(os.path.basename(__file__)))
# run main routine
main()
| 24.952096
| 79
| 0.586273
|
import os
import pytest
import sys
import numpy as np
try:
import flopy
except:
msg = "Error. FloPy package is not available.\n"
msg += "Try installing using the following command:\n"
msg += " pip install flopy"
raise Exception(msg)
from framework import testing_framework
from simulation import Simulation
import targets
mf6_exe = os.path.abspath(targets.target_dict["mf6"])
ex = "gwf_lakobs_01a"
exdir = os.path.join("temp", ex)
gwf = None
def get_idomain(nlay, nrow, ncol, lakend):
idomain = np.ones((nlay, nrow, ncol), dtype=int)
for k, j in enumerate(lakend):
idomain[k, 0, 0:j] = 0
return idomain
def build_model():
lx = 300.0
lz = 45.0
nlay = 45
nrow = 1
ncol = 30
nper = 1
delc = 1.0
delr = lx / ncol
delz = lz / nlay
top = 5.0
botm = [top - (k + 1) * delz for k in range(nlay)]
perlen = [20.0]
nstp = [1]
tsmult = [1.0]
Kh = 1.0
Kv = 1.0
tdis_rc = []
for i in range(nper):
tdis_rc.append((perlen[i], nstp[i], tsmult[i]))
nouter, ninner = 700, 300
hclose, rclose, relax = 1e-8, 1e-6, 0.97
name = ex
ws = exdir
sim = flopy.mf6.MFSimulation(
sim_name=name, version="mf6", exe_name=mf6_exe, sim_ws=ws
)
tdis = flopy.mf6.ModflowTdis(
sim, time_units="DAYS", nper=nper, perioddata=tdis_rc
)
gwfname = name
global gwf
gwf = flopy.mf6.ModflowGwf(sim, modelname=gwfname, newtonoptions="NEWTON")
imsgwf = flopy.mf6.ModflowIms(
sim,
print_option="SUMMARY",
outer_dvclose=hclose,
outer_maximum=nouter,
under_relaxation="NONE",
inner_maximum=ninner,
inner_dvclose=hclose,
rcloserecord=rclose,
linear_acceleration="BICGSTAB",
scaling_method="NONE",
reordering_method="NONE",
relaxation_factor=relax,
filename="{}.ims".format(gwfname),
)
lakend = [10, 9, 8, 7, 6]
idomain = get_idomain(nlay, nrow, ncol, lakend)
dis = flopy.mf6.ModflowGwfdis(
gwf,
nlay=nlay,
nrow=nrow,
ncol=ncol,
delr=delr,
delc=delc,
top=top,
botm=botm,
idomain=idomain,
)
strt = np.zeros((nlay, nrow, ncol), dtype=float)
strt += top
ic = flopy.mf6.ModflowGwfic(gwf, strt=strt)
npf = flopy.mf6.ModflowGwfnpf(
gwf,
xt3doptions=False,
save_flows=True,
save_specific_discharge=True,
icelltype=1,
k=Kh,
k33=Kv,
)
sy = 0.3
ss = np.zeros((nlay, nrow, ncol), dtype=float)
idx = np.where(idomain == 0)
for k, i, j in zip(*idx):
ss[k + 1, i, j] = 0.0
sto = flopy.mf6.ModflowGwfsto(gwf, sy=sy, ss=ss, iconvert=1)
irch = np.zeros((nrow, ncol), dtype=int)
lake_vconnect = []
idx = np.where(idomain == 0)
for k, i, j in zip(*idx):
if idomain[k + 1, i, j] == 1:
lake_vconnect.append((k + 1, i, j))
irch[i, j] = k + 1
nlakeconn = len(lake_vconnect)
initial_stage = 0.1
pak_data = [(0, initial_stage, nlakeconn)]
bedleak = 100.0
belev = 0.0
con_data = [
(0, i, idx, "VERTICAL", bedleak, belev, -99, -99, -99)
for i, idx in enumerate(lake_vconnect)
]
p_data = [
(0, "STATUS", "ACTIVE"),
]
fname = "{}.lak.obs.csv".format(gwfname)
lak_obs = {
fname: [
("lakestage", "stage", 1),
("lakevolume", "volume", 1),
("lak1", "lak", 1),
],
"digits": 10,
}
lak = flopy.mf6.modflow.ModflowGwflak(
gwf,
surfdep=0.0,
save_flows=True,
print_input=True,
print_flows=True,
print_stage=True,
stage_filerecord="{}.lak.bin".format(gwfname),
budget_filerecord="{}.lak.bud".format(gwfname),
nlakes=len(pak_data),
ntables=0,
packagedata=pak_data,
pname="LAK-1",
connectiondata=con_data,
perioddata=p_data,
observations=lak_obs,
)
chdspd = [((0, 0, ncol - 1), 5.0)]
chd = flopy.mf6.modflow.ModflowGwfchd(gwf, stress_period_data=chdspd)
rech = 0.0001 * np.ones((nrow, ncol), dtype=float)
rch = flopy.mf6.modflow.ModflowGwfrcha(
gwf, print_flows=True, save_flows=True, recharge=rech, irch=irch
)
oc = flopy.mf6.ModflowGwfoc(
gwf,
budget_filerecord="{}.cbc".format(gwfname),
head_filerecord="{}.hds".format(gwfname),
headprintrecord=[("COLUMNS", 10, "WIDTH", 15, "DIGITS", 6, "GENERAL")],
saverecord=[("HEAD", "ALL"), ("BUDGET", "ALL")],
printrecord=[("HEAD", "ALL"), ("BUDGET", "ALL")],
)
return sim
def test_mf6model():
test = testing_framework()
sim = build_model()
sim.write_simulation()
sim.run_simulation()
f = open(os.path.join(exdir, "mfsim.lst"), "r")
lines = f.readlines()
error_count = 0
expected_msg = False
for line in lines:
if "ID2 (iconn) is missing" in line:
expected_msg = True
error_count += 1
assert error_count == 1, (
"error count = " + str(error_count) + "but should equal 1"
)
orig_fl = os.path.join(exdir, ex + ".lak.obs")
new_fl = os.path.join(exdir, ex + ".lak.obs.new")
sr = open(orig_fl, "r")
sw = open(new_fl, "w")
lines = sr.readlines()
error_free_line = " lak1 lak 1 1\n"
for line in lines:
if " lak " in line:
sw.write(error_free_line)
else:
sw.write(line)
sr.close()
sw.close()
os.remove(orig_fl)
os.rename(new_fl, orig_fl)
sim.run_simulation()
return
def main():
test = testing_framework()
sim = build_model()
sim.write_simulation()
sim.run_simulation()
f = open(os.path.join(exdir, "mfsim.lst"), "r")
lines = f.readlines()
error_count = 0
expected_msg = False
for line in lines:
if "ID2 (iconn) is missing" in line:
expected_msg = True
error_count += 1
assert error_count == 1, (
"error count = " + str(error_count) + ", but should equal 1"
)
orig_fl = os.path.join(exdir, ex + ".lak.obs")
new_fl = os.path.join(exdir, ex + ".lak.obs.new")
sr = open(orig_fl, "r")
sw = open(new_fl, "w")
lines = sr.readlines()
error_free_line = " lak1 lak 1 1\n"
for line in lines:
if " lak " in line:
sw.write(error_free_line)
else:
sw.write(line)
sr.close()
sw.close()
os.remove(orig_fl)
os.rename(new_fl, orig_fl)
sim.run_simulation()
return
if __name__ == "__main__":
print("standalone run of {}".format(os.path.basename(__file__)))
main()
| true
| true
|
f7056da8b11d4e248ba6d5172376ce2589dd69a5
| 1,745
|
py
|
Python
|
src/explore.py
|
argsim/argsim
|
e5407acf7e47f2bf517b0c580fcdee3654d31089
|
[
"MIT"
] | null | null | null |
src/explore.py
|
argsim/argsim
|
e5407acf7e47f2bf517b0c580fcdee3654d31089
|
[
"MIT"
] | 2
|
2019-01-09T21:35:39.000Z
|
2019-03-11T18:12:21.000Z
|
src/explore.py
|
argsim/argsim
|
e5407acf7e47f2bf517b0c580fcdee3654d31089
|
[
"MIT"
] | null | null | null |
import tensorflow as tf
from model import vAe, decode
import util_sp as sp
from util_io import load_txt
import numpy as np
def analyze(z, use_dim=[], seed=25):
''' z = np.array[2, dim], mu of two sentences'''
''' use_dim = list of int describing which dimension should be used '''
# select random path from z1 to z2
np.random.seed(seed)
if use_dim == []:
rdm_path = np.arange(len(z[0]))
else:
rdm_path = use_dim
np.random.shuffle(rdm_path)
# walk the path and print at every step
path = np.copy(z[0])
for idx,dim in enumerate(rdm_path):
path[dim] = z[1][dim]
output = decode(sess, vae, [z[0], path, z[1]]).tolist()
_ = [vocab.decode_ids(output[idx]) for idx in range(3)]
print(idx,dim, _[1])
#print("{}\n{}\n{}\n{}\n".format(idx,_[0],_[1],_[2])) #print: sentence1, path, sentence2
path_vocab = "../trial/data/vocab.model"
path_txt = "../data/test_data.txt"
path_ckpt = "../trial/ckpt/kudo18"
path_use_dim = "../data/useful_dimension.npy"
# load and restore model
vae = vAe('infer')
sess = tf.InteractiveSession()
tf.train.Saver().restore(sess, path_ckpt)
# load vocab and text
vocab = sp.load_spm(path_vocab)
text = list(load_txt(path_txt))
#pick 2 random sentences to explore
np.random.seed(23)
sen_idx = np.random.random_integers(0, len(text), 2)
sentences = [text[idx] for idx in sen_idx]
print("sentence 1: {}\nsentence 2: {}".format(sentences[0], sentences[1]))
# encode sentences with sentence piece model
data = sp.encode(vocab, sentences)
### full high dimensional space
z = vae.z.eval({vae.tgt: data})
analyze(z)
### only the dimensions that turned out usefull for our task
use_dim = np.load(path_use_dim)
analyze(z, use_dim)
| 29.083333
| 96
| 0.667049
|
import tensorflow as tf
from model import vAe, decode
import util_sp as sp
from util_io import load_txt
import numpy as np
def analyze(z, use_dim=[], seed=25):
np.random.seed(seed)
if use_dim == []:
rdm_path = np.arange(len(z[0]))
else:
rdm_path = use_dim
np.random.shuffle(rdm_path)
path = np.copy(z[0])
for idx,dim in enumerate(rdm_path):
path[dim] = z[1][dim]
output = decode(sess, vae, [z[0], path, z[1]]).tolist()
_ = [vocab.decode_ids(output[idx]) for idx in range(3)]
print(idx,dim, _[1])
ab.model"
path_txt = "../data/test_data.txt"
path_ckpt = "../trial/ckpt/kudo18"
path_use_dim = "../data/useful_dimension.npy"
vae = vAe('infer')
sess = tf.InteractiveSession()
tf.train.Saver().restore(sess, path_ckpt)
vocab = sp.load_spm(path_vocab)
text = list(load_txt(path_txt))
np.random.seed(23)
sen_idx = np.random.random_integers(0, len(text), 2)
sentences = [text[idx] for idx in sen_idx]
print("sentence 1: {}\nsentence 2: {}".format(sentences[0], sentences[1]))
data = sp.encode(vocab, sentences)
| true
| true
|
f7056ed1da5c17a757cb1e9eff0dcc005ac50fbb
| 40,250
|
py
|
Python
|
venv/Lib/site-packages/caffe2/python/onnx/backend.py
|
countBMB/BenjiRepo
|
79d882263baaf2a11654ca67d2e5593074d36dfa
|
[
"Apache-2.0"
] | 1
|
2020-02-24T06:23:07.000Z
|
2020-02-24T06:23:07.000Z
|
venv/Lib/site-packages/caffe2/python/onnx/backend.py
|
countBMB/BenjiRepo
|
79d882263baaf2a11654ca67d2e5593074d36dfa
|
[
"Apache-2.0"
] | 4
|
2021-06-02T00:49:27.000Z
|
2022-01-13T01:59:34.000Z
|
venv/Lib/site-packages/caffe2/python/onnx/backend.py
|
countBMB/BenjiRepo
|
79d882263baaf2a11654ca67d2e5593074d36dfa
|
[
"Apache-2.0"
] | null | null | null |
## @package onnx
# Module caffe2.python.onnx.backend
"""Backend for running ONNX on Caffe2
To run this, you will need to have Caffe2 installed as well.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os
import collections
from subprocess import Popen, PIPE
import sys
import zipfile
import itertools
# When onnx is built against a version of protobuf that is older than
# that which is vendored with caffe2, onnx will crash if caffe2's
# vendored protobuf is loaded first. We can work around this by
# importing onnx first, which will cause it to go out and pick up the
# system protobuf.
import onnx.backend
import caffe2
from caffe2.python import core, workspace, rnn_cell, gru_cell
from caffe2.python.compatibility import container_abcs
from caffe2.python.model_helper import ModelHelper
from caffe2.proto import caffe2_pb2
import caffe2.python.utils
import numpy as np
import onnx
from onnx import checker, GraphProto, TensorProto, AttributeProto, ModelProto
import onnx.numpy_helper
import onnx.defs
import onnx.optimizer
import onnx.shape_inference
import onnx.utils
from onnx.backend.base import Backend, Device, DeviceType, namedtupledict
from caffe2.python.onnx.workspace import Workspace
from caffe2.python.onnx.backend_rep import Caffe2Rep
from caffe2.python.onnx.backend_cpp_rep import Caffe2CppRep
import caffe2.python._import_c_extension as C
import warnings
def force_unicode(s):
try:
return s.decode('utf-8')
except AttributeError:
return s
def get_device_option(device):
m = {DeviceType.CPU: caffe2_pb2.CPU,
DeviceType.CUDA: workspace.GpuDeviceType}
return core.DeviceOption(m[device.type], device.device_id)
class OnnxAttributes(dict):
"""
This is a more convenient way to work with ONNX/Caffe2 attributes
that is not the protobuf representation.
"""
@staticmethod
def from_onnx(args):
d = OnnxAttributes()
for arg in args:
d[arg.name] = convertAttributeProto(arg)
return d
def caffe2(self, kmap=lambda k: k):
for k, v in self.items():
if kmap(k) != '':
yield caffe2.python.utils.MakeArgument(kmap(k), v)
# TODO: Move this into ONNX main library
def convertAttributeProto(onnx_arg):
"""
Convert an ONNX AttributeProto into an appropriate Python object
for the type.
NB: Tensor attribute gets returned as the straight proto.
"""
if onnx_arg.HasField('f'):
return onnx_arg.f
elif onnx_arg.HasField('i'):
return onnx_arg.i
elif onnx_arg.HasField('s'):
return onnx_arg.s
elif onnx_arg.HasField('t'):
return onnx_arg.t # this is a proto!
elif onnx_arg.HasField('g'):
return Caffe2Backend._graph_to_net(onnx_arg.g, Caffe2Backend._known_opset_version)
elif len(onnx_arg.floats):
return list(onnx_arg.floats)
elif len(onnx_arg.ints):
return list(onnx_arg.ints)
elif len(onnx_arg.strings):
return list(onnx_arg.strings)
elif len(onnx_arg.graphs):
retval = []
# TODO: this doesn't work with RNN ops
for g in onnx_arg.graphs:
retval.append(Caffe2Backend._graph_to_net(g, Caffe2Backend._known_opset_version))
return retval
else:
raise ValueError("Unsupported ONNX attribute: {}".format(onnx_arg))
# TODO: Move this into ONNX main library
class OnnxNode(object):
"""
Reimplementation of NodeProto from ONNX, but in a form
more convenient to work with from Python.
We may temporarily edit these nodes to get them into Caffe2 form,
before actually translating into the Caffe2 protobuf, since this
is easier than decomposing everything, and putting it back together
when we're ready.
"""
def __init__(self, node):
self.name = str(node.name)
self.op_type = str(node.op_type)
self.attrs = OnnxAttributes.from_onnx(node.attribute)
self.inputs = list(node.input)
self.outputs = list(node.output)
Caffe2Ops = collections.namedtuple('Caffe2Ops', ['ops', 'init_ops', 'interface_blobs'])
class Caffe2Backend(Backend):
# The greatest version of the ONNX operator set which we are aware of.
# Models whose version is larger than this will cause us to emit a warning
# that we are attempting to translate on a "best effort" basis.
#
# If you increase this, make SURE you cross-reference all BC-breaking
# changes from one version to the next, and any that you did not
# implement, mark as broken in _broken_operators
_known_opset_version = 9
# This dictionary will record operators which are KNOWN to be
# broken, so we give a good error message rather than do something
# bogus and then fail.
_broken_operators = {
# 'BrokenOp': version_it_was_broken_in
}
# Operators that are different between Caffe2 and
# ONNX but only in their name.
# In most cases, this should be empty - as the effort of ONNX is
# to unify the operator definitions.
_renamed_operators = {
'GlobalMaxPool': 'MaxPool',
'GlobalAveragePool': 'AveragePool',
'Pad': 'PadImage',
'Neg': 'Negative',
'BatchNormalization': 'SpatialBN',
'InstanceNormalization': 'InstanceNorm',
'MatMul': 'BatchMatMul',
'Upsample': 'ResizeNearest',
'Identity': 'Copy',
'InstanceNormalization': 'InstanceNorm',
'Equal': 'EQ',
'Less': 'LT',
'Greater': 'GT',
'Unsqueeze': 'ExpandDims',
'Loop': 'ONNXWhile',
'Tile': 'NumpyTile',
'RandomNormal': 'GaussianFill',
'RandomUniform': 'UniformFill',
}
_global_renamed_attrs = {'kernel_shape': 'kernels'}
_per_op_renamed_attrs = {
'Squeeze': {'axes': 'dims'},
'Unsqueeze': {'axes': 'dims'},
'Transpose': {'perm': 'axes'},
'Upsample': {'mode': '',
'scales': ''},
'ConvTranspose': {'output_padding': 'adjs'},
'Selu': {'gamma': 'scale'},
'If': {'then_branch': 'then_net',
'else_branch': 'else_net'},
'RandomUniform': {'low': 'min',
'high': 'max'}
}
# operators whose behavior is different beyond renaming
# the value is an attribute of this class that is a
# function from ToffeIR node_def to caffe2 op_def
_special_operators = {
'LSTM': '_create_rnn_variant',
'GRU': '_create_rnn_variant',
'RNN': '_create_rnn_variant',
'Loop': '_create_loop',
'If': '_create_if',
'Upsample': '_create_upsample',
'RandomNormal': '_create_gaussian_fill'
}
# Dummy name generator
_dummy_name = C.DummyName()
@classmethod
def dummy_name(cls):
return cls._dummy_name.new_dummy_name()
# NB: By default, you will use the LATEST definition of the operator,
# so this interface MAY make BC-breaking changes. Specify an
# opset_version if you don't want this to version.
@classmethod
def run_node(cls, node, inputs, device='CPU', opset_version=_known_opset_version, outputs_info=None):
super(Caffe2Backend, cls).run_node(node, inputs, device=device,
outputs_info=outputs_info, opset_version=opset_version)
value_infos = []
device_option = get_device_option(Device(device))
ws = Workspace()
with core.DeviceScope(device_option): # temporary!
if isinstance(inputs, dict):
for key, value in inputs.items():
ws.FeedBlob(key, value)
value_infos.append(onnx.helper.make_tensor_value_info(
name=key,
elem_type=onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[value.dtype],
shape=value.shape).SerializeToString())
else:
assert len(node.input) == len(inputs), "{}: expected {} but got {}".format(
node.op_type, len(node.input), len(inputs))
for key, value in zip(node.input, inputs):
ws.FeedBlob(key, value)
value_infos.append(onnx.helper.make_tensor_value_info(
name=key,
elem_type=onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[value.dtype],
shape=value.shape).SerializeToString())
ops = []
cbackend = C.Caffe2Backend(cls._dummy_name)
ops_str = cbackend.convert_node(node.SerializeToString(), value_infos, opset_version)
for s in ops_str[0] + ops_str[1]:
op = caffe2_pb2.OperatorDef()
op.ParseFromString(s)
op.device_option.CopyFrom(device_option)
ops.append(op)
ws.RunOperatorsOnce(ops)
output_values = [ws.FetchBlob(name) for name in node.output]
return namedtupledict('Outputs', node.output)(*output_values)
@classmethod
def _create_tensor_filling_op(cls, onnx_tensor, name=None):
"""
Given an Onnx TensorProto, translate it into a Caffe2 operator
which produces the given tensor filling op.
"""
assert name or onnx_tensor.name
name = name or onnx_tensor.name
c2_op = caffe2_pb2.OperatorDef()
c2_values = c2_op.arg.add()
c2_values.name = "values"
def tensor2list(onnx_tensor):
# Use the onnx.numpy_helper because the data may be raw
return onnx.numpy_helper.to_array(onnx_tensor).flatten().tolist()
if onnx_tensor.data_type in [TensorProto.FLOAT]:
c2_op.type = 'GivenTensorFill'
c2_values.floats.extend(tensor2list(onnx_tensor))
elif onnx_tensor.data_type in [TensorProto.DOUBLE]:
c2_op.type = 'GivenTensorDoubleFill'
c2_values.floats.extend(tensor2list(onnx_tensor))
elif onnx_tensor.data_type in [TensorProto.INT64,
TensorProto.UINT32]:
c2_op.type = 'GivenTensorInt64Fill'
c2_values.ints.extend(tensor2list(onnx_tensor))
elif onnx_tensor.data_type in [TensorProto.UINT8,
TensorProto.INT8,
TensorProto.UINT16,
TensorProto.INT16,
TensorProto.INT32]:
c2_op.type = 'GivenTensorIntFill'
c2_values.ints.extend(tensor2list(onnx_tensor))
elif onnx_tensor.data_type == TensorProto.BOOL:
c2_op.type = 'GivenTensorBoolFill'
c2_values.ints.extend(tensor2list(onnx_tensor))
elif onnx_tensor.data_type == TensorProto.STRING:
c2_op.type = 'GivenTensorStringFill'
c2_values.strings.extend(onnx_tensor.string_data)
else:
raise RuntimeError(
"unrecognized tensor type {}".format(onnx_tensor.data_type))
c2_shape = c2_op.arg.add()
c2_shape.name = "shape"
c2_shape.ints.extend(onnx_tensor.dims)
c2_op.output.append(name)
return c2_op
@classmethod
def _rnn_reform_weights(cls, reforms, name, hidden_size, init_net, gates, reorder_indices):
for name_from, name_to, do_concat, extra_dims in reforms:
gate_blobs = ['%s/%s_%s' % (name, prefix, name_to) for prefix in gates]
for i, x in enumerate(gate_blobs):
dim0 = i * hidden_size, (i+1) * hidden_size
starts, ends = zip(dim0, *extra_dims)
init_net.Slice(name_from, x, starts=starts, ends=ends)
if do_concat:
reordered_gate_blobs = [gate_blobs[i] for i in reorder_indices]
init_net.Concat(reordered_gate_blobs, ['%s/%s' % (name, name_to), cls.dummy_name()], axis=0)
@classmethod
def _make_rnn_direction(cls, input_blob, B, W, R, initial_states_and_names, sequence_lens,
pred_mh, init_net,
input_size, hidden_size, num_gates, direction_offset,
Bi, Br, W_, R_,
reform, make_cell, keep_outputs):
name = cls.dummy_name()
# input and recurrence biases are squashed together in onnx
# but not in caffe2
gates_hidden_size = num_gates * hidden_size
bias_offset = 2 * direction_offset * gates_hidden_size
weight_offset = direction_offset * gates_hidden_size
Bi = init_net.Slice(B, name + Bi,
starts=[bias_offset + 0 * gates_hidden_size],
ends =[bias_offset + 1 * gates_hidden_size])
Br = init_net.Slice(B, name + Br,
starts=[bias_offset + 1 * gates_hidden_size],
ends =[bias_offset + 2 * gates_hidden_size])
W_ = init_net.Slice(W, name + W_,
starts=[weight_offset + 0 * gates_hidden_size, 0],
ends =[weight_offset + 1 * gates_hidden_size,-1])
R_ = init_net.Slice(R, name + R_,
starts=[weight_offset + 0 * gates_hidden_size, 0],
ends =[weight_offset + 1 * gates_hidden_size,-1])
initial_states_sliced = []
for initial_state, name_suffix in initial_states_and_names:
initial_states_sliced.append(
pred_mh.net.Slice(initial_state, name + name_suffix,
starts=[direction_offset + 0, 0, 0],
ends =[direction_offset + 1,-1,-1]))
if direction_offset == 1:
if sequence_lens is not None:
seq_lens_for_reverse = sequence_lens
else:
input_shape = pred_mh.net.Shape(input_blob, name + '/input_shape')
batch_size = pred_mh.net.Slice(input_shape, name + '/batch_size_slice', starts=[1], ends=[2])
seq_len = pred_mh.net.Slice(input_shape, name + '/seq_len_slice', starts=[0], ends=[1])
dummy_sequence_lens = pred_mh.net.Tile([seq_len, batch_size], name + '/dummy_sequence_lens', axis=0)
pred_mh.net.Reshape(dummy_sequence_lens, [dummy_sequence_lens, cls.dummy_name()], shape=[-1])
seq_lens_for_reverse = pred_mh.net.Cast(dummy_sequence_lens, name + '/seq_lens_for_reverse', to=core.DataType.INT32)
reform(Bi, Br, W_, R_, name, hidden_size, init_net)
if direction_offset == 1:
input = pred_mh.net.ReversePackedSegs(
[input_blob, seq_lens_for_reverse], name + "/input-reversed")
else:
input = input_blob
outputs = keep_outputs(list(make_cell(
pred_mh,
input,
sequence_lens,
initial_states_sliced,
input_size,
hidden_size,
name,
drop_states=False,
forward_only=True,
)))
if direction_offset == 1:
outputs[0] = pred_mh.net.ReversePackedSegs(
[outputs[0], seq_lens_for_reverse], name + "/output-reversed")
return outputs
@classmethod
def _create_rnn_variant(cls, init_model, pred_model, n, opset_version):
assert init_model is not None, "cannot convert RNNs without access to the full model"
assert pred_model is not None, "cannot convert RNNs without access to the full model"
attrs = dict(n.attrs) # make a copy, which is safe to mutate
hidden_size = attrs.pop('hidden_size')
direction = force_unicode(attrs.pop('direction', 'forward'))
if n.op_type == 'RNN':
activation = force_unicode(attrs.pop('activations', ('tanh',))[0].lower())
elif n.op_type == 'GRU':
linear_before_reset = attrs.pop('linear_before_reset', 0)
assert not attrs, "unsupported RNN attributes: " + str(attrs.keys())
assert direction in ['forward', 'bidirectional'], "unsupported backwards RNN/GRU/LSTM"
if n.op_type in ['RNN', 'GRU']:
input_blob, W, R, B, sequence_lens, initial_h = n.inputs
elif n.op_type == 'LSTM':
input_blob, W, R, B, sequence_lens, initial_h, initial_c = n.inputs
if sequence_lens == "":
sequence_lens = None
for x in itertools.chain(init_model.graph.input,
init_model.graph.value_info,
pred_model.graph.input,
pred_model.graph.value_info):
if x.name == W:
input_size = x.type.tensor_type.shape.dim[2].dim_value
break
else:
raise RuntimeError("best-effort shape inference for RNN/GRU/LSTM failed")
pred_mh = ModelHelper()
init_net = core.Net("init-net")
init_net.Reshape(W, [W, cls.dummy_name()], shape=[1,-1,0])
init_net.Squeeze(W, W, dims=[0])
init_net.Reshape(R, [R, cls.dummy_name()], shape=[1,-1,0])
init_net.Squeeze(R, R, dims=[0])
init_net.Reshape(B, [B, cls.dummy_name()], shape=[1,-1])
init_net.Squeeze(B, B, dims=[0])
if n.op_type == 'RNN':
def reform(*args):
pass
def make_cell(*args, **kwargs):
return rnn_cell.BasicRNN(*args, activation=activation, **kwargs)
def make_rnn(direction_offset):
return cls._make_rnn_direction(
input_blob, B, W, R, [(initial_h, '/initial_h')], sequence_lens,
pred_mh, init_net, input_size, hidden_size, 1, direction_offset,
"/i2h_b", "/gates_t_b", "/i2h_w", "/gates_t_w",
reform, make_cell, lambda x: x)
elif n.op_type == 'GRU':
def reform(Bi, Br, W_, R_, name, hidden_size, init_net):
# caffe2 has a different order from onnx. We need to rearrange
# z r h -> r z h
reforms = ((W_, 'i2h_w', True, [(0,-1)]),
(R_, 'gate_t_w', False, [(0,-1)]),
(Bi, 'i2h_b', True, []),
(Br, 'gate_t_b', False, []))
cls._rnn_reform_weights(reforms, name, hidden_size, init_net,
['update', 'reset', 'output'], [1, 0, 2])
def make_cell(*args, **kwargs):
return gru_cell.GRU(*args, linear_before_reset=linear_before_reset, **kwargs)
def make_rnn(direction_offset):
return cls._make_rnn_direction(
input_blob, B, W, R, [(initial_h, '/initial_h')], sequence_lens,
pred_mh, init_net, input_size, hidden_size, 3, direction_offset,
"_bias_i2h", "_bias_gates", "/i2h_w_pre", "/gates_t_w_pre",
reform, make_cell, lambda x: x)
elif n.op_type == 'LSTM':
def reform(Bi, Br, W_, R_, name, hidden_size, init_net):
# caffe2 has a different order from onnx. We need to rearrange
# i o f c -> i f o c
reforms = ((W_, 'i2h_w', True, [(0, -1)]),
(R_, 'gates_t_w', True, [(0, -1)]),
(Bi, 'i2h_b' , True, []),
(Br, 'gates_t_b', True, []))
cls._rnn_reform_weights(reforms, name, hidden_size, init_net,
['input', 'output', 'forget', 'cell'], [0, 2, 1, 3])
def make_cell(*args, **kwargs):
return rnn_cell.LSTM(*args, **kwargs)
def make_rnn(direction_offset):
return cls._make_rnn_direction(
input_blob, B, W, R, [(initial_h, '/initial_h'), (initial_c, '/initial_c')], sequence_lens,
pred_mh, init_net, input_size, hidden_size, 4, direction_offset,
"/i2h_b", "/gates_t_b", "/i2h_w", "/gates_t_w",
reform, make_cell, lambda x: [x[0], x[1], x[3]])
if direction == 'forward':
outputs = make_rnn(0)
# in the forward case, storage is shared between the
# last outputs. We need to decouple them so that the
# VariableLengthSequencePadding only mutates
# n.outputs[0]
for i in range(1, len(outputs)):
pred_mh.net.Copy(outputs[i], n.outputs[i])
if sequence_lens is not None:
pred_mh.net.VariableLengthSequencePadding(
[outputs[0], sequence_lens], [outputs[0]])
pred_mh.net.ExpandDims([outputs[0]], [n.outputs[0]], dims=[1])
elif direction == 'bidirectional':
outputs_f = make_rnn(0)
outputs_b = make_rnn(1)
concatted_output, _ = pred_mh.net.Concat(
[outputs_f[0], outputs_b[0]], [cls.dummy_name(), cls.dummy_name()], axis=2)
if sequence_lens is not None:
pred_mh.net.VariableLengthSequencePadding(
[concatted_output, sequence_lens], [concatted_output])
reshaped_output, _ = pred_mh.net.Reshape(concatted_output, [cls.dummy_name(), cls.dummy_name()], shape=[0,0,-1,2])
pred_mh.net.Transpose(reshaped_output, n.outputs[0], axes=[0,2,1,3])
for i in range(1, len(n.outputs)):
pred_mh.net.Concat([outputs_f[i], outputs_b[i]],
[n.outputs[i], cls.dummy_name()], axis=0)
# We want to decide whether to put all of our weight-reshaping
# operators in the init net or the predict net. We can put
# them in the init net iff the inputs to those operators are
# already available, either as graph initializers, or as the
# output of other operators in the init net. The latter case
# occurs, for example, when exporting from pytorch to onnx.
# In most production use, we expect has_initializers to be
# true.
initializers = {i.name for i in init_model.graph.initializer}
outputs = {output for node in init_model.graph.node for output in node.output}
has_initializers = all(x in initializers or x in outputs for x in (W, R, B))
pred_ops = []
init_ops = []
(init_ops if has_initializers else pred_ops).extend(init_net.Proto().op)
pred_ops.extend(pred_mh.Proto().op)
return Caffe2Ops(pred_ops, init_ops, list(pred_mh.Proto().external_input))
@classmethod
def _create_control_op(cls, init_model, pred_model, n, opset_version):
control_inputs = []
if '__control_inputs' in n.attrs:
control_inputs.extend(n.attrs['__control_inputs'])
node = cls._common_onnx_node_to_caffe2_op(init_model, pred_model, n, opset_version)
node.control_input.extend(control_inputs)
return Caffe2Ops([node], [], [])
@classmethod
def _remove_ssa(cls, net, remap_dict):
for op in net.op:
for i, name in enumerate(op.output):
if name in remap_dict:
op.output[i] = remap_dict[name]
for i, out in enumerate(net.external_output):
if out in remap_dict:
net.external_output[i] = remap_dict[out]
@classmethod
def _create_if(cls, init_model, pred_model, n, opset_version):
ops = cls._create_control_op(init_model, pred_model, n, opset_version)
assert ops[0][0].type == 'If'
if_op = ops[0][0]
then_net = else_net = None
control_inputs = []
for arg in if_op.arg:
if arg.name == 'then_net':
then_net = arg.n
if arg.name == 'else_net':
else_net = arg.n
if arg.name == '__control_inputs':
control_inputs = arg.strings
assert then_net and else_net
then_net_outs = then_net.external_output
else_net_outs = else_net.external_output
op_outputs = if_op.output
assert len(then_net_outs) == len(else_net_outs)
assert len(else_net_outs) == len(op_outputs)
for arg in if_op.arg:
if arg.name == 'then_net':
arg.n.external_input.extend(control_inputs)
if arg.name == 'else_net':
arg.n.external_input.extend(control_inputs)
return ops
@classmethod
def _create_loop(cls, init_model, pred_model, n, opset_version):
ops = cls._create_control_op(init_model, pred_model, n, opset_version)
assert ops[0][0].type == 'ONNXWhile'
while_op = ops[0][0]
while_op.arg.extend([caffe2.python.utils.MakeArgument('has_trip_count', True)])
while_op.arg.extend([caffe2.python.utils.MakeArgument('has_cond', True)])
while_op.arg.extend([caffe2.python.utils.MakeArgument('disable_scopes', True)])
control_inputs = []
for arg in while_op.arg:
if arg.name == '__control_inputs':
control_inputs = arg.strings
num_loop_carried_deps = 0
for arg in while_op.arg:
if arg.name == 'body':
num_loop_carried_deps = len(arg.n.external_input) - 2
arg.n.external_input.extend(control_inputs)
while_op.arg.extend([
caffe2.python.utils.MakeArgument('num_loop_carried_deps',
num_loop_carried_deps)
])
return ops
@classmethod
def _substitute_raw_value(cls, tp, raw_values_dict):
if tp.HasField('raw_data') and tp.raw_data == bytes(b'__EXTERNAL'):
if tp.name not in raw_values_dict:
raise RuntimeError('TensorProto for value {} referenced raw data but it was not found!'.format(tp.name))
else:
tp.raw_data = raw_values_dict[tp.name]
@classmethod
def _visit_and_substitute_raw_values(cls, nodes, raw_values_dict):
for node in nodes:
for attr in node.attribute:
if attr.HasField('t'):
cls._substitute_raw_value(attr.t, raw_values_dict)
for t in attr.tensors:
cls._substitute_raw_value(t, raw_values_dict)
if attr.HasField('g'):
cls._visit_and_substitute_raw_values(attr.g.node, raw_values_dict)
for g in attr.graphs:
cls._visit_and_substitute_raw_values(g.node, raw_values_dict)
@classmethod
def _external_value_resolution_pass(cls, model, raw_values_dict):
for init in model.graph.initializer:
cls._substitute_raw_value(init, raw_values_dict)
cls._visit_and_substitute_raw_values(model.graph.node, raw_values_dict)
@classmethod
def _direct_initialize_parameters(cls, initializer, ws, device_option):
for tp in initializer:
ws.FeedBlob(tp.name, onnx.numpy_helper.to_array(tp), device_option)
@classmethod
def _direct_initialize_inputs(cls, inputs, initialized, ws, device_option):
for value_info in inputs:
if value_info.name in initialized:
continue
shape = list(d.dim_value for d in value_info.type.tensor_type.shape.dim)
ws.FeedBlob(
value_info.name,
np.ones(shape, dtype=onnx.mapping.TENSOR_TYPE_TO_NP_TYPE[value_info.type.tensor_type.elem_type]),
device_option)
@staticmethod
def optimize_onnx(input, init=False, predict=False):
passes = ['fuse_consecutive_transposes',
'eliminate_nop_transpose',
'fuse_transpose_into_gemm',
'lift_lexical_references']
if init:
passes.append('split_init')
if predict:
passes.append('split_predict')
out = onnx.optimizer.optimize(input, passes)
return out
@classmethod
def prepare_zip_archive(cls, file, device='CPU', **kwargs):
with zipfile.ZipFile(file, mode='r') as z:
with z.open('__MODEL_PROTO', 'r') as f:
model = onnx.load(f);
blob_names = set(z.namelist()) - set('__MODEL_PROTO')
# TODO: make this more efficient
raw_values_dict = {}
for name in blob_names:
with z.open(name, 'r') as blob_file:
raw_values_dict[name] = blob_file.read()
return cls.prepare(model, device, raw_values_dict=raw_values_dict, **kwargs)
@classmethod
def prepare(cls, model, device='CPU', raw_values_dict=None, **kwargs):
'''
For Onnx Caffe2Backend, we require that init_graph don't initialize the actual input of the predict_graph,
for example, if "img" is the input blob for the predict_net, we require that in init_graph and in
initializer of the predict_graph, "img" is not initalized. We don't have a check for this, since
there is no way we can know which blob is the input of the predict_graph.
'''
if not kwargs.pop('no_check_UNSAFE', False):
super(Caffe2Backend, cls).prepare(model, device, **kwargs)
opset_version = None
for imp in model.opset_import:
if not imp.HasField("domain") or imp.domain == "":
opset_version = imp.version
if imp.version > cls._known_opset_version:
warnings.warn("This version of onnx-caffe2 targets ONNX operator set version {}, but the model we are trying to import uses version {}. We will try to import it anyway, but if the model uses operators which had BC-breaking changes in the intervening versions, import will fail.".format(cls._known_opset_version, imp.version))
else:
warnings.warn("Unrecognized operator set {}".format(imp.domain))
if opset_version is None:
if model.ir_version >= 0x00000003:
raise RuntimeError("Model with IR version >= 3 did not specify ONNX operator set version (onnx-caffe2 requires it)")
else:
opset_version = 1
model = onnx.shape_inference.infer_shapes(model)
ws = Workspace()
device_option = get_device_option(Device(device))
init_net, predict_net = cls._onnx_model_to_caffe2_net(model, device, opset_version, False)
if raw_values_dict:
cls._external_value_resolution_pass(model, raw_values_dict)
# Directly load initializer data into blobs in workspace
cls._direct_initialize_parameters(
model.graph.initializer,
ws,
device_option,
)
initialized = {init.name for init in model.graph.initializer}
cls._direct_initialize_inputs(
model.graph.input,
initialized,
ws,
device_option,
)
uninitialized = [value_info.name for value_info in model.graph.input if value_info.name not in initialized]
retval = Caffe2Rep(init_net, predict_net, ws, uninitialized)
return retval
@classmethod
# TODO: This method needs a refactor for clarity
def _onnx_node_to_caffe2_op(cls, init_model, pred_model, node_def, opset_version):
cbackend = C.Caffe2Backend(cls._dummy_name)
if cbackend.support_onnx_import(node_def.op_type):
# extract value infos from pred model (value infos of
# node's inputs that are in init model should be all
# available in pred model)
value_infos = []
for name in node_def.input:
if pred_model is not None:
for vi in itertools.chain(pred_model.graph.input,
pred_model.graph.output,
pred_model.graph.value_info):
if vi.name == name:
value_infos.append(vi.SerializeToString())
op_strs = cbackend.convert_node(node_def.SerializeToString(), value_infos, opset_version)
init_ops = []
for s in op_strs[0]:
op = caffe2_pb2.OperatorDef()
op.ParseFromString(s)
init_ops.append(op)
ops = []
for s in op_strs[1]:
op = caffe2_pb2.OperatorDef()
op.ParseFromString(s)
ops.append(op)
return Caffe2Ops(ops, init_ops, [])
if node_def.op_type in cls._special_operators:
translator = getattr(cls, cls._special_operators[node_def.op_type])
else:
translator = cls._common_onnx_node_to_caffe2_op
ops = translator(init_model, pred_model, OnnxNode(node_def), opset_version)
if isinstance(ops, Caffe2Ops):
return ops
if not isinstance(ops, container_abcs.Iterable):
ops = [ops]
return Caffe2Ops(ops, [], [])
_broadcast_operators = {
'Add',
'Sub',
}
@classmethod
def _common_onnx_node_to_caffe2_op(cls, init_model, pred_model, onnx_node, opset_version):
"""
This translator performs the basic translation of ONNX nodes into
Caffe2 operators. Besides doing a straightforward marshalling from
one format to another, it also does these extra things:
- Renames operators based on '_renamed_operators'
- Renames attributes based on '_global_renamed_attrs' and
'_per_op_renamed_attrs'
If you're writing a custom translator, consider calling this first,
and then fixing things up further.
"""
c2_op = caffe2_pb2.OperatorDef()
c2_op.input.extend(onnx_node.inputs)
c2_op.output.extend(onnx_node.outputs)
c2_op.name = onnx_node.name
onnx_op_type = onnx_node.op_type
broken_version = cls._broken_operators.get(onnx_op_type, float('Inf'))
if broken_version <= opset_version:
raise ValueError(
"Don't know how to translate op {} in ONNX operator set v{} (I only support prior to v{})".format(onnx_op_type, opset_version, broken_version))
c2_op.type = cls._renamed_operators.get(onnx_op_type, onnx_op_type)
if not core.IsOperator(c2_op.type):
raise ValueError(
"Don't know how to translate op {}".format(onnx_op_type))
def kmap(k):
if (onnx_op_type in cls._per_op_renamed_attrs and
k in cls._per_op_renamed_attrs[onnx_op_type]):
return cls._per_op_renamed_attrs[onnx_op_type][k]
if k in cls._global_renamed_attrs:
return cls._global_renamed_attrs[k]
return k
c2_op.arg.extend(onnx_node.attrs.caffe2(kmap=kmap))
if opset_version < 7:
# onnx opset 7 and newest caffe2 have adopted full onnx broadcast semantics
# so we don't need this hack anymore
if c2_op.type in cls._broadcast_operators:
already_broadcast = False
for arg in c2_op.arg:
if arg.name == 'broadcast':
already_broadcast = True
if not already_broadcast:
c2_op.arg.extend([caffe2.python.utils.MakeArgument('broadcast', 1)])
return c2_op
@staticmethod
def _all_names_in_graph(graph):
if graph is None:
return set()
names = set()
names.update(value_info.name for value_info in graph.input)
names.update(value_info.name for value_info in graph.output)
for node in graph.node:
names.update(node.input)
names.update(node.output)
return names
@classmethod
def _graph_to_net(cls, onnx_graph, opset_version):
net = caffe2_pb2.NetDef()
for node in onnx_graph.node:
try:
c2ops = cls._onnx_node_to_caffe2_op(
None, None, node, opset_version)
except Exception as e:
print('ONNX FATAL:', e)
continue
net.op.extend(c2ops.init_ops)
net.op.extend(c2ops.ops)
net.external_input.extend(c2ops.interface_blobs)
net.external_output.extend(
value_info.name for value_info in onnx_graph.output)
net.external_input.extend(
value_info.name for value_info in onnx_graph.input)
return net
@classmethod
def _onnx_model_to_caffe2_net(cls, onnx_model, device, opset_version, include_initializers):
device_option = get_device_option(Device(device))
onnx_model = onnx.utils.polish_model(onnx_model)
init_model = cls.optimize_onnx(onnx_model, init=True)
pred_model = cls.optimize_onnx(onnx_model, predict=True)
init_net = caffe2_pb2.NetDef()
pred_net = caffe2_pb2.NetDef()
init_net.name = onnx_model.graph.name + '_init'
pred_net.name = onnx_model.graph.name + '_predict'
if include_initializers:
init_net.op.extend(cls._create_tensor_filling_op(tp) for tp in onnx_model.graph.initializer)
cls._dummy_name.reset(cls._all_names_in_graph(init_model.graph) | cls._all_names_in_graph(pred_model.graph))
errors = []
for net, model in ( (init_net, init_model), (pred_net, pred_model) ):
net.device_option.CopyFrom(device_option)
for node in model.graph.node:
try:
c2ops = cls._onnx_node_to_caffe2_op(
init_model, pred_model, node, opset_version)
except Exception as e:
msg = 'Error while processing node: {}. Exception: {}'.format(node, e)
errors.append(msg)
print('ONNX FATAL:', msg, file=sys.stderr)
continue
init_net.op.extend(c2ops.init_ops)
net.op.extend(c2ops.ops)
net.external_input.extend(c2ops.interface_blobs)
net.external_output.extend(
value_info.name for value_info in model.graph.output)
net.external_input.extend(
value_info.name for value_info in model.graph.input)
if len(errors) > 0:
raise RuntimeError(
"ONNX conversion failed, encountered {} errors:\n\n{}".format(
len(errors), "\n\n".join(errors)))
return init_net, pred_net
# wrapper for backwards compatability
@classmethod
def onnx_graph_to_caffe2_net(cls, model, device="CPU", opset_version=_known_opset_version):
return cls._onnx_model_to_caffe2_net(model, device=device, opset_version=opset_version, include_initializers=True)
@classmethod
def supports_device(cls, device_str):
device = Device(device_str)
if device.type == DeviceType.CPU:
return True
elif core.IsGPUDeviceType(device.type):
return workspace.has_gpu_support
return False
@classmethod
def is_compatible(cls, model, device='CPU', **kwargs):
if hasattr(super(Caffe2Backend, cls), 'is_compatible') \
and callable(super(Caffe2Backend, cls).is_compatible):
if not super(Caffe2Backend, cls).is_compatible(model, device, **kwargs):
return False
# TODO: should have an unspported list of operators, be optimistic for now
return True
prepare = Caffe2Backend.prepare
prepare_zip_archive = Caffe2Backend.prepare_zip_archive
run_node = Caffe2Backend.run_node
run_model = Caffe2Backend.run_model
supports_device = Caffe2Backend.supports_device # noqa
is_compatible = Caffe2Backend.is_compatible
| 42.279412
| 346
| 0.602062
|
e__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os
import collections
from subprocess import Popen, PIPE
import sys
import zipfile
import itertools
# vendored protobuf is loaded first. We can work around this by
# importing onnx first, which will cause it to go out and pick up the
# system protobuf.
import onnx.backend
import caffe2
from caffe2.python import core, workspace, rnn_cell, gru_cell
from caffe2.python.compatibility import container_abcs
from caffe2.python.model_helper import ModelHelper
from caffe2.proto import caffe2_pb2
import caffe2.python.utils
import numpy as np
import onnx
from onnx import checker, GraphProto, TensorProto, AttributeProto, ModelProto
import onnx.numpy_helper
import onnx.defs
import onnx.optimizer
import onnx.shape_inference
import onnx.utils
from onnx.backend.base import Backend, Device, DeviceType, namedtupledict
from caffe2.python.onnx.workspace import Workspace
from caffe2.python.onnx.backend_rep import Caffe2Rep
from caffe2.python.onnx.backend_cpp_rep import Caffe2CppRep
import caffe2.python._import_c_extension as C
import warnings
def force_unicode(s):
try:
return s.decode('utf-8')
except AttributeError:
return s
def get_device_option(device):
m = {DeviceType.CPU: caffe2_pb2.CPU,
DeviceType.CUDA: workspace.GpuDeviceType}
return core.DeviceOption(m[device.type], device.device_id)
class OnnxAttributes(dict):
@staticmethod
def from_onnx(args):
d = OnnxAttributes()
for arg in args:
d[arg.name] = convertAttributeProto(arg)
return d
def caffe2(self, kmap=lambda k: k):
for k, v in self.items():
if kmap(k) != '':
yield caffe2.python.utils.MakeArgument(kmap(k), v)
# TODO: Move this into ONNX main library
def convertAttributeProto(onnx_arg):
if onnx_arg.HasField('f'):
return onnx_arg.f
elif onnx_arg.HasField('i'):
return onnx_arg.i
elif onnx_arg.HasField('s'):
return onnx_arg.s
elif onnx_arg.HasField('t'):
return onnx_arg.t # this is a proto!
elif onnx_arg.HasField('g'):
return Caffe2Backend._graph_to_net(onnx_arg.g, Caffe2Backend._known_opset_version)
elif len(onnx_arg.floats):
return list(onnx_arg.floats)
elif len(onnx_arg.ints):
return list(onnx_arg.ints)
elif len(onnx_arg.strings):
return list(onnx_arg.strings)
elif len(onnx_arg.graphs):
retval = []
# TODO: this doesn't work with RNN ops
for g in onnx_arg.graphs:
retval.append(Caffe2Backend._graph_to_net(g, Caffe2Backend._known_opset_version))
return retval
else:
raise ValueError("Unsupported ONNX attribute: {}".format(onnx_arg))
class OnnxNode(object):
def __init__(self, node):
self.name = str(node.name)
self.op_type = str(node.op_type)
self.attrs = OnnxAttributes.from_onnx(node.attribute)
self.inputs = list(node.input)
self.outputs = list(node.output)
Caffe2Ops = collections.namedtuple('Caffe2Ops', ['ops', 'init_ops', 'interface_blobs'])
class Caffe2Backend(Backend):
_known_opset_version = 9
_broken_operators = {
}
_renamed_operators = {
'GlobalMaxPool': 'MaxPool',
'GlobalAveragePool': 'AveragePool',
'Pad': 'PadImage',
'Neg': 'Negative',
'BatchNormalization': 'SpatialBN',
'InstanceNormalization': 'InstanceNorm',
'MatMul': 'BatchMatMul',
'Upsample': 'ResizeNearest',
'Identity': 'Copy',
'InstanceNormalization': 'InstanceNorm',
'Equal': 'EQ',
'Less': 'LT',
'Greater': 'GT',
'Unsqueeze': 'ExpandDims',
'Loop': 'ONNXWhile',
'Tile': 'NumpyTile',
'RandomNormal': 'GaussianFill',
'RandomUniform': 'UniformFill',
}
_global_renamed_attrs = {'kernel_shape': 'kernels'}
_per_op_renamed_attrs = {
'Squeeze': {'axes': 'dims'},
'Unsqueeze': {'axes': 'dims'},
'Transpose': {'perm': 'axes'},
'Upsample': {'mode': '',
'scales': ''},
'ConvTranspose': {'output_padding': 'adjs'},
'Selu': {'gamma': 'scale'},
'If': {'then_branch': 'then_net',
'else_branch': 'else_net'},
'RandomUniform': {'low': 'min',
'high': 'max'}
}
_special_operators = {
'LSTM': '_create_rnn_variant',
'GRU': '_create_rnn_variant',
'RNN': '_create_rnn_variant',
'Loop': '_create_loop',
'If': '_create_if',
'Upsample': '_create_upsample',
'RandomNormal': '_create_gaussian_fill'
}
_dummy_name = C.DummyName()
@classmethod
def dummy_name(cls):
return cls._dummy_name.new_dummy_name()
@classmethod
def run_node(cls, node, inputs, device='CPU', opset_version=_known_opset_version, outputs_info=None):
super(Caffe2Backend, cls).run_node(node, inputs, device=device,
outputs_info=outputs_info, opset_version=opset_version)
value_infos = []
device_option = get_device_option(Device(device))
ws = Workspace()
with core.DeviceScope(device_option): # temporary!
if isinstance(inputs, dict):
for key, value in inputs.items():
ws.FeedBlob(key, value)
value_infos.append(onnx.helper.make_tensor_value_info(
name=key,
elem_type=onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[value.dtype],
shape=value.shape).SerializeToString())
else:
assert len(node.input) == len(inputs), "{}: expected {} but got {}".format(
node.op_type, len(node.input), len(inputs))
for key, value in zip(node.input, inputs):
ws.FeedBlob(key, value)
value_infos.append(onnx.helper.make_tensor_value_info(
name=key,
elem_type=onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[value.dtype],
shape=value.shape).SerializeToString())
ops = []
cbackend = C.Caffe2Backend(cls._dummy_name)
ops_str = cbackend.convert_node(node.SerializeToString(), value_infos, opset_version)
for s in ops_str[0] + ops_str[1]:
op = caffe2_pb2.OperatorDef()
op.ParseFromString(s)
op.device_option.CopyFrom(device_option)
ops.append(op)
ws.RunOperatorsOnce(ops)
output_values = [ws.FetchBlob(name) for name in node.output]
return namedtupledict('Outputs', node.output)(*output_values)
@classmethod
def _create_tensor_filling_op(cls, onnx_tensor, name=None):
assert name or onnx_tensor.name
name = name or onnx_tensor.name
c2_op = caffe2_pb2.OperatorDef()
c2_values = c2_op.arg.add()
c2_values.name = "values"
def tensor2list(onnx_tensor):
# Use the onnx.numpy_helper because the data may be raw
return onnx.numpy_helper.to_array(onnx_tensor).flatten().tolist()
if onnx_tensor.data_type in [TensorProto.FLOAT]:
c2_op.type = 'GivenTensorFill'
c2_values.floats.extend(tensor2list(onnx_tensor))
elif onnx_tensor.data_type in [TensorProto.DOUBLE]:
c2_op.type = 'GivenTensorDoubleFill'
c2_values.floats.extend(tensor2list(onnx_tensor))
elif onnx_tensor.data_type in [TensorProto.INT64,
TensorProto.UINT32]:
c2_op.type = 'GivenTensorInt64Fill'
c2_values.ints.extend(tensor2list(onnx_tensor))
elif onnx_tensor.data_type in [TensorProto.UINT8,
TensorProto.INT8,
TensorProto.UINT16,
TensorProto.INT16,
TensorProto.INT32]:
c2_op.type = 'GivenTensorIntFill'
c2_values.ints.extend(tensor2list(onnx_tensor))
elif onnx_tensor.data_type == TensorProto.BOOL:
c2_op.type = 'GivenTensorBoolFill'
c2_values.ints.extend(tensor2list(onnx_tensor))
elif onnx_tensor.data_type == TensorProto.STRING:
c2_op.type = 'GivenTensorStringFill'
c2_values.strings.extend(onnx_tensor.string_data)
else:
raise RuntimeError(
"unrecognized tensor type {}".format(onnx_tensor.data_type))
c2_shape = c2_op.arg.add()
c2_shape.name = "shape"
c2_shape.ints.extend(onnx_tensor.dims)
c2_op.output.append(name)
return c2_op
@classmethod
def _rnn_reform_weights(cls, reforms, name, hidden_size, init_net, gates, reorder_indices):
for name_from, name_to, do_concat, extra_dims in reforms:
gate_blobs = ['%s/%s_%s' % (name, prefix, name_to) for prefix in gates]
for i, x in enumerate(gate_blobs):
dim0 = i * hidden_size, (i+1) * hidden_size
starts, ends = zip(dim0, *extra_dims)
init_net.Slice(name_from, x, starts=starts, ends=ends)
if do_concat:
reordered_gate_blobs = [gate_blobs[i] for i in reorder_indices]
init_net.Concat(reordered_gate_blobs, ['%s/%s' % (name, name_to), cls.dummy_name()], axis=0)
@classmethod
def _make_rnn_direction(cls, input_blob, B, W, R, initial_states_and_names, sequence_lens,
pred_mh, init_net,
input_size, hidden_size, num_gates, direction_offset,
Bi, Br, W_, R_,
reform, make_cell, keep_outputs):
name = cls.dummy_name()
# input and recurrence biases are squashed together in onnx
# but not in caffe2
gates_hidden_size = num_gates * hidden_size
bias_offset = 2 * direction_offset * gates_hidden_size
weight_offset = direction_offset * gates_hidden_size
Bi = init_net.Slice(B, name + Bi,
starts=[bias_offset + 0 * gates_hidden_size],
ends =[bias_offset + 1 * gates_hidden_size])
Br = init_net.Slice(B, name + Br,
starts=[bias_offset + 1 * gates_hidden_size],
ends =[bias_offset + 2 * gates_hidden_size])
W_ = init_net.Slice(W, name + W_,
starts=[weight_offset + 0 * gates_hidden_size, 0],
ends =[weight_offset + 1 * gates_hidden_size,-1])
R_ = init_net.Slice(R, name + R_,
starts=[weight_offset + 0 * gates_hidden_size, 0],
ends =[weight_offset + 1 * gates_hidden_size,-1])
initial_states_sliced = []
for initial_state, name_suffix in initial_states_and_names:
initial_states_sliced.append(
pred_mh.net.Slice(initial_state, name + name_suffix,
starts=[direction_offset + 0, 0, 0],
ends =[direction_offset + 1,-1,-1]))
if direction_offset == 1:
if sequence_lens is not None:
seq_lens_for_reverse = sequence_lens
else:
input_shape = pred_mh.net.Shape(input_blob, name + '/input_shape')
batch_size = pred_mh.net.Slice(input_shape, name + '/batch_size_slice', starts=[1], ends=[2])
seq_len = pred_mh.net.Slice(input_shape, name + '/seq_len_slice', starts=[0], ends=[1])
dummy_sequence_lens = pred_mh.net.Tile([seq_len, batch_size], name + '/dummy_sequence_lens', axis=0)
pred_mh.net.Reshape(dummy_sequence_lens, [dummy_sequence_lens, cls.dummy_name()], shape=[-1])
seq_lens_for_reverse = pred_mh.net.Cast(dummy_sequence_lens, name + '/seq_lens_for_reverse', to=core.DataType.INT32)
reform(Bi, Br, W_, R_, name, hidden_size, init_net)
if direction_offset == 1:
input = pred_mh.net.ReversePackedSegs(
[input_blob, seq_lens_for_reverse], name + "/input-reversed")
else:
input = input_blob
outputs = keep_outputs(list(make_cell(
pred_mh,
input,
sequence_lens,
initial_states_sliced,
input_size,
hidden_size,
name,
drop_states=False,
forward_only=True,
)))
if direction_offset == 1:
outputs[0] = pred_mh.net.ReversePackedSegs(
[outputs[0], seq_lens_for_reverse], name + "/output-reversed")
return outputs
@classmethod
def _create_rnn_variant(cls, init_model, pred_model, n, opset_version):
assert init_model is not None, "cannot convert RNNs without access to the full model"
assert pred_model is not None, "cannot convert RNNs without access to the full model"
attrs = dict(n.attrs) # make a copy, which is safe to mutate
hidden_size = attrs.pop('hidden_size')
direction = force_unicode(attrs.pop('direction', 'forward'))
if n.op_type == 'RNN':
activation = force_unicode(attrs.pop('activations', ('tanh',))[0].lower())
elif n.op_type == 'GRU':
linear_before_reset = attrs.pop('linear_before_reset', 0)
assert not attrs, "unsupported RNN attributes: " + str(attrs.keys())
assert direction in ['forward', 'bidirectional'], "unsupported backwards RNN/GRU/LSTM"
if n.op_type in ['RNN', 'GRU']:
input_blob, W, R, B, sequence_lens, initial_h = n.inputs
elif n.op_type == 'LSTM':
input_blob, W, R, B, sequence_lens, initial_h, initial_c = n.inputs
if sequence_lens == "":
sequence_lens = None
for x in itertools.chain(init_model.graph.input,
init_model.graph.value_info,
pred_model.graph.input,
pred_model.graph.value_info):
if x.name == W:
input_size = x.type.tensor_type.shape.dim[2].dim_value
break
else:
raise RuntimeError("best-effort shape inference for RNN/GRU/LSTM failed")
pred_mh = ModelHelper()
init_net = core.Net("init-net")
init_net.Reshape(W, [W, cls.dummy_name()], shape=[1,-1,0])
init_net.Squeeze(W, W, dims=[0])
init_net.Reshape(R, [R, cls.dummy_name()], shape=[1,-1,0])
init_net.Squeeze(R, R, dims=[0])
init_net.Reshape(B, [B, cls.dummy_name()], shape=[1,-1])
init_net.Squeeze(B, B, dims=[0])
if n.op_type == 'RNN':
def reform(*args):
pass
def make_cell(*args, **kwargs):
return rnn_cell.BasicRNN(*args, activation=activation, **kwargs)
def make_rnn(direction_offset):
return cls._make_rnn_direction(
input_blob, B, W, R, [(initial_h, '/initial_h')], sequence_lens,
pred_mh, init_net, input_size, hidden_size, 1, direction_offset,
"/i2h_b", "/gates_t_b", "/i2h_w", "/gates_t_w",
reform, make_cell, lambda x: x)
elif n.op_type == 'GRU':
def reform(Bi, Br, W_, R_, name, hidden_size, init_net):
# caffe2 has a different order from onnx. We need to rearrange
# z r h -> r z h
reforms = ((W_, 'i2h_w', True, [(0,-1)]),
(R_, 'gate_t_w', False, [(0,-1)]),
(Bi, 'i2h_b', True, []),
(Br, 'gate_t_b', False, []))
cls._rnn_reform_weights(reforms, name, hidden_size, init_net,
['update', 'reset', 'output'], [1, 0, 2])
def make_cell(*args, **kwargs):
return gru_cell.GRU(*args, linear_before_reset=linear_before_reset, **kwargs)
def make_rnn(direction_offset):
return cls._make_rnn_direction(
input_blob, B, W, R, [(initial_h, '/initial_h')], sequence_lens,
pred_mh, init_net, input_size, hidden_size, 3, direction_offset,
"_bias_i2h", "_bias_gates", "/i2h_w_pre", "/gates_t_w_pre",
reform, make_cell, lambda x: x)
elif n.op_type == 'LSTM':
def reform(Bi, Br, W_, R_, name, hidden_size, init_net):
# caffe2 has a different order from onnx. We need to rearrange
# i o f c -> i f o c
reforms = ((W_, 'i2h_w', True, [(0, -1)]),
(R_, 'gates_t_w', True, [(0, -1)]),
(Bi, 'i2h_b' , True, []),
(Br, 'gates_t_b', True, []))
cls._rnn_reform_weights(reforms, name, hidden_size, init_net,
['input', 'output', 'forget', 'cell'], [0, 2, 1, 3])
def make_cell(*args, **kwargs):
return rnn_cell.LSTM(*args, **kwargs)
def make_rnn(direction_offset):
return cls._make_rnn_direction(
input_blob, B, W, R, [(initial_h, '/initial_h'), (initial_c, '/initial_c')], sequence_lens,
pred_mh, init_net, input_size, hidden_size, 4, direction_offset,
"/i2h_b", "/gates_t_b", "/i2h_w", "/gates_t_w",
reform, make_cell, lambda x: [x[0], x[1], x[3]])
if direction == 'forward':
outputs = make_rnn(0)
# in the forward case, storage is shared between the
# last outputs. We need to decouple them so that the
# VariableLengthSequencePadding only mutates
# n.outputs[0]
for i in range(1, len(outputs)):
pred_mh.net.Copy(outputs[i], n.outputs[i])
if sequence_lens is not None:
pred_mh.net.VariableLengthSequencePadding(
[outputs[0], sequence_lens], [outputs[0]])
pred_mh.net.ExpandDims([outputs[0]], [n.outputs[0]], dims=[1])
elif direction == 'bidirectional':
outputs_f = make_rnn(0)
outputs_b = make_rnn(1)
concatted_output, _ = pred_mh.net.Concat(
[outputs_f[0], outputs_b[0]], [cls.dummy_name(), cls.dummy_name()], axis=2)
if sequence_lens is not None:
pred_mh.net.VariableLengthSequencePadding(
[concatted_output, sequence_lens], [concatted_output])
reshaped_output, _ = pred_mh.net.Reshape(concatted_output, [cls.dummy_name(), cls.dummy_name()], shape=[0,0,-1,2])
pred_mh.net.Transpose(reshaped_output, n.outputs[0], axes=[0,2,1,3])
for i in range(1, len(n.outputs)):
pred_mh.net.Concat([outputs_f[i], outputs_b[i]],
[n.outputs[i], cls.dummy_name()], axis=0)
# We want to decide whether to put all of our weight-reshaping
# operators in the init net or the predict net. We can put
# them in the init net iff the inputs to those operators are
# already available, either as graph initializers, or as the
# output of other operators in the init net. The latter case
# occurs, for example, when exporting from pytorch to onnx.
# In most production use, we expect has_initializers to be
# true.
initializers = {i.name for i in init_model.graph.initializer}
outputs = {output for node in init_model.graph.node for output in node.output}
has_initializers = all(x in initializers or x in outputs for x in (W, R, B))
pred_ops = []
init_ops = []
(init_ops if has_initializers else pred_ops).extend(init_net.Proto().op)
pred_ops.extend(pred_mh.Proto().op)
return Caffe2Ops(pred_ops, init_ops, list(pred_mh.Proto().external_input))
@classmethod
def _create_control_op(cls, init_model, pred_model, n, opset_version):
control_inputs = []
if '__control_inputs' in n.attrs:
control_inputs.extend(n.attrs['__control_inputs'])
node = cls._common_onnx_node_to_caffe2_op(init_model, pred_model, n, opset_version)
node.control_input.extend(control_inputs)
return Caffe2Ops([node], [], [])
@classmethod
def _remove_ssa(cls, net, remap_dict):
for op in net.op:
for i, name in enumerate(op.output):
if name in remap_dict:
op.output[i] = remap_dict[name]
for i, out in enumerate(net.external_output):
if out in remap_dict:
net.external_output[i] = remap_dict[out]
@classmethod
def _create_if(cls, init_model, pred_model, n, opset_version):
ops = cls._create_control_op(init_model, pred_model, n, opset_version)
assert ops[0][0].type == 'If'
if_op = ops[0][0]
then_net = else_net = None
control_inputs = []
for arg in if_op.arg:
if arg.name == 'then_net':
then_net = arg.n
if arg.name == 'else_net':
else_net = arg.n
if arg.name == '__control_inputs':
control_inputs = arg.strings
assert then_net and else_net
then_net_outs = then_net.external_output
else_net_outs = else_net.external_output
op_outputs = if_op.output
assert len(then_net_outs) == len(else_net_outs)
assert len(else_net_outs) == len(op_outputs)
for arg in if_op.arg:
if arg.name == 'then_net':
arg.n.external_input.extend(control_inputs)
if arg.name == 'else_net':
arg.n.external_input.extend(control_inputs)
return ops
@classmethod
def _create_loop(cls, init_model, pred_model, n, opset_version):
ops = cls._create_control_op(init_model, pred_model, n, opset_version)
assert ops[0][0].type == 'ONNXWhile'
while_op = ops[0][0]
while_op.arg.extend([caffe2.python.utils.MakeArgument('has_trip_count', True)])
while_op.arg.extend([caffe2.python.utils.MakeArgument('has_cond', True)])
while_op.arg.extend([caffe2.python.utils.MakeArgument('disable_scopes', True)])
control_inputs = []
for arg in while_op.arg:
if arg.name == '__control_inputs':
control_inputs = arg.strings
num_loop_carried_deps = 0
for arg in while_op.arg:
if arg.name == 'body':
num_loop_carried_deps = len(arg.n.external_input) - 2
arg.n.external_input.extend(control_inputs)
while_op.arg.extend([
caffe2.python.utils.MakeArgument('num_loop_carried_deps',
num_loop_carried_deps)
])
return ops
@classmethod
def _substitute_raw_value(cls, tp, raw_values_dict):
if tp.HasField('raw_data') and tp.raw_data == bytes(b'__EXTERNAL'):
if tp.name not in raw_values_dict:
raise RuntimeError('TensorProto for value {} referenced raw data but it was not found!'.format(tp.name))
else:
tp.raw_data = raw_values_dict[tp.name]
@classmethod
def _visit_and_substitute_raw_values(cls, nodes, raw_values_dict):
for node in nodes:
for attr in node.attribute:
if attr.HasField('t'):
cls._substitute_raw_value(attr.t, raw_values_dict)
for t in attr.tensors:
cls._substitute_raw_value(t, raw_values_dict)
if attr.HasField('g'):
cls._visit_and_substitute_raw_values(attr.g.node, raw_values_dict)
for g in attr.graphs:
cls._visit_and_substitute_raw_values(g.node, raw_values_dict)
@classmethod
def _external_value_resolution_pass(cls, model, raw_values_dict):
for init in model.graph.initializer:
cls._substitute_raw_value(init, raw_values_dict)
cls._visit_and_substitute_raw_values(model.graph.node, raw_values_dict)
@classmethod
def _direct_initialize_parameters(cls, initializer, ws, device_option):
for tp in initializer:
ws.FeedBlob(tp.name, onnx.numpy_helper.to_array(tp), device_option)
@classmethod
def _direct_initialize_inputs(cls, inputs, initialized, ws, device_option):
for value_info in inputs:
if value_info.name in initialized:
continue
shape = list(d.dim_value for d in value_info.type.tensor_type.shape.dim)
ws.FeedBlob(
value_info.name,
np.ones(shape, dtype=onnx.mapping.TENSOR_TYPE_TO_NP_TYPE[value_info.type.tensor_type.elem_type]),
device_option)
@staticmethod
def optimize_onnx(input, init=False, predict=False):
passes = ['fuse_consecutive_transposes',
'eliminate_nop_transpose',
'fuse_transpose_into_gemm',
'lift_lexical_references']
if init:
passes.append('split_init')
if predict:
passes.append('split_predict')
out = onnx.optimizer.optimize(input, passes)
return out
@classmethod
def prepare_zip_archive(cls, file, device='CPU', **kwargs):
with zipfile.ZipFile(file, mode='r') as z:
with z.open('__MODEL_PROTO', 'r') as f:
model = onnx.load(f);
blob_names = set(z.namelist()) - set('__MODEL_PROTO')
# TODO: make this more efficient
raw_values_dict = {}
for name in blob_names:
with z.open(name, 'r') as blob_file:
raw_values_dict[name] = blob_file.read()
return cls.prepare(model, device, raw_values_dict=raw_values_dict, **kwargs)
@classmethod
def prepare(cls, model, device='CPU', raw_values_dict=None, **kwargs):
if not kwargs.pop('no_check_UNSAFE', False):
super(Caffe2Backend, cls).prepare(model, device, **kwargs)
opset_version = None
for imp in model.opset_import:
if not imp.HasField("domain") or imp.domain == "":
opset_version = imp.version
if imp.version > cls._known_opset_version:
warnings.warn("This version of onnx-caffe2 targets ONNX operator set version {}, but the model we are trying to import uses version {}. We will try to import it anyway, but if the model uses operators which had BC-breaking changes in the intervening versions, import will fail.".format(cls._known_opset_version, imp.version))
else:
warnings.warn("Unrecognized operator set {}".format(imp.domain))
if opset_version is None:
if model.ir_version >= 0x00000003:
raise RuntimeError("Model with IR version >= 3 did not specify ONNX operator set version (onnx-caffe2 requires it)")
else:
opset_version = 1
model = onnx.shape_inference.infer_shapes(model)
ws = Workspace()
device_option = get_device_option(Device(device))
init_net, predict_net = cls._onnx_model_to_caffe2_net(model, device, opset_version, False)
if raw_values_dict:
cls._external_value_resolution_pass(model, raw_values_dict)
# Directly load initializer data into blobs in workspace
cls._direct_initialize_parameters(
model.graph.initializer,
ws,
device_option,
)
initialized = {init.name for init in model.graph.initializer}
cls._direct_initialize_inputs(
model.graph.input,
initialized,
ws,
device_option,
)
uninitialized = [value_info.name for value_info in model.graph.input if value_info.name not in initialized]
retval = Caffe2Rep(init_net, predict_net, ws, uninitialized)
return retval
@classmethod
# TODO: This method needs a refactor for clarity
def _onnx_node_to_caffe2_op(cls, init_model, pred_model, node_def, opset_version):
cbackend = C.Caffe2Backend(cls._dummy_name)
if cbackend.support_onnx_import(node_def.op_type):
# extract value infos from pred model (value infos of
# node's inputs that are in init model should be all
value_infos = []
for name in node_def.input:
if pred_model is not None:
for vi in itertools.chain(pred_model.graph.input,
pred_model.graph.output,
pred_model.graph.value_info):
if vi.name == name:
value_infos.append(vi.SerializeToString())
op_strs = cbackend.convert_node(node_def.SerializeToString(), value_infos, opset_version)
init_ops = []
for s in op_strs[0]:
op = caffe2_pb2.OperatorDef()
op.ParseFromString(s)
init_ops.append(op)
ops = []
for s in op_strs[1]:
op = caffe2_pb2.OperatorDef()
op.ParseFromString(s)
ops.append(op)
return Caffe2Ops(ops, init_ops, [])
if node_def.op_type in cls._special_operators:
translator = getattr(cls, cls._special_operators[node_def.op_type])
else:
translator = cls._common_onnx_node_to_caffe2_op
ops = translator(init_model, pred_model, OnnxNode(node_def), opset_version)
if isinstance(ops, Caffe2Ops):
return ops
if not isinstance(ops, container_abcs.Iterable):
ops = [ops]
return Caffe2Ops(ops, [], [])
_broadcast_operators = {
'Add',
'Sub',
}
@classmethod
def _common_onnx_node_to_caffe2_op(cls, init_model, pred_model, onnx_node, opset_version):
c2_op = caffe2_pb2.OperatorDef()
c2_op.input.extend(onnx_node.inputs)
c2_op.output.extend(onnx_node.outputs)
c2_op.name = onnx_node.name
onnx_op_type = onnx_node.op_type
broken_version = cls._broken_operators.get(onnx_op_type, float('Inf'))
if broken_version <= opset_version:
raise ValueError(
"Don't know how to translate op {} in ONNX operator set v{} (I only support prior to v{})".format(onnx_op_type, opset_version, broken_version))
c2_op.type = cls._renamed_operators.get(onnx_op_type, onnx_op_type)
if not core.IsOperator(c2_op.type):
raise ValueError(
"Don't know how to translate op {}".format(onnx_op_type))
def kmap(k):
if (onnx_op_type in cls._per_op_renamed_attrs and
k in cls._per_op_renamed_attrs[onnx_op_type]):
return cls._per_op_renamed_attrs[onnx_op_type][k]
if k in cls._global_renamed_attrs:
return cls._global_renamed_attrs[k]
return k
c2_op.arg.extend(onnx_node.attrs.caffe2(kmap=kmap))
if opset_version < 7:
if c2_op.type in cls._broadcast_operators:
already_broadcast = False
for arg in c2_op.arg:
if arg.name == 'broadcast':
already_broadcast = True
if not already_broadcast:
c2_op.arg.extend([caffe2.python.utils.MakeArgument('broadcast', 1)])
return c2_op
@staticmethod
def _all_names_in_graph(graph):
if graph is None:
return set()
names = set()
names.update(value_info.name for value_info in graph.input)
names.update(value_info.name for value_info in graph.output)
for node in graph.node:
names.update(node.input)
names.update(node.output)
return names
@classmethod
def _graph_to_net(cls, onnx_graph, opset_version):
net = caffe2_pb2.NetDef()
for node in onnx_graph.node:
try:
c2ops = cls._onnx_node_to_caffe2_op(
None, None, node, opset_version)
except Exception as e:
print('ONNX FATAL:', e)
continue
net.op.extend(c2ops.init_ops)
net.op.extend(c2ops.ops)
net.external_input.extend(c2ops.interface_blobs)
net.external_output.extend(
value_info.name for value_info in onnx_graph.output)
net.external_input.extend(
value_info.name for value_info in onnx_graph.input)
return net
@classmethod
def _onnx_model_to_caffe2_net(cls, onnx_model, device, opset_version, include_initializers):
device_option = get_device_option(Device(device))
onnx_model = onnx.utils.polish_model(onnx_model)
init_model = cls.optimize_onnx(onnx_model, init=True)
pred_model = cls.optimize_onnx(onnx_model, predict=True)
init_net = caffe2_pb2.NetDef()
pred_net = caffe2_pb2.NetDef()
init_net.name = onnx_model.graph.name + '_init'
pred_net.name = onnx_model.graph.name + '_predict'
if include_initializers:
init_net.op.extend(cls._create_tensor_filling_op(tp) for tp in onnx_model.graph.initializer)
cls._dummy_name.reset(cls._all_names_in_graph(init_model.graph) | cls._all_names_in_graph(pred_model.graph))
errors = []
for net, model in ( (init_net, init_model), (pred_net, pred_model) ):
net.device_option.CopyFrom(device_option)
for node in model.graph.node:
try:
c2ops = cls._onnx_node_to_caffe2_op(
init_model, pred_model, node, opset_version)
except Exception as e:
msg = 'Error while processing node: {}. Exception: {}'.format(node, e)
errors.append(msg)
print('ONNX FATAL:', msg, file=sys.stderr)
continue
init_net.op.extend(c2ops.init_ops)
net.op.extend(c2ops.ops)
net.external_input.extend(c2ops.interface_blobs)
net.external_output.extend(
value_info.name for value_info in model.graph.output)
net.external_input.extend(
value_info.name for value_info in model.graph.input)
if len(errors) > 0:
raise RuntimeError(
"ONNX conversion failed, encountered {} errors:\n\n{}".format(
len(errors), "\n\n".join(errors)))
return init_net, pred_net
# wrapper for backwards compatability
@classmethod
def onnx_graph_to_caffe2_net(cls, model, device="CPU", opset_version=_known_opset_version):
return cls._onnx_model_to_caffe2_net(model, device=device, opset_version=opset_version, include_initializers=True)
@classmethod
def supports_device(cls, device_str):
device = Device(device_str)
if device.type == DeviceType.CPU:
return True
elif core.IsGPUDeviceType(device.type):
return workspace.has_gpu_support
return False
@classmethod
def is_compatible(cls, model, device='CPU', **kwargs):
if hasattr(super(Caffe2Backend, cls), 'is_compatible') \
and callable(super(Caffe2Backend, cls).is_compatible):
if not super(Caffe2Backend, cls).is_compatible(model, device, **kwargs):
return False
# TODO: should have an unspported list of operators, be optimistic for now
return True
prepare = Caffe2Backend.prepare
prepare_zip_archive = Caffe2Backend.prepare_zip_archive
run_node = Caffe2Backend.run_node
run_model = Caffe2Backend.run_model
supports_device = Caffe2Backend.supports_device # noqa
is_compatible = Caffe2Backend.is_compatible
| true
| true
|
f7056f50f1feb79d81aa60f148cfd317c84f892f
| 11,513
|
py
|
Python
|
argocd_python_client/model/v1_event_list.py
|
RyanSiu1995/argocd-python-client
|
2e8f097fe09f247a46ac70692241a93d1acd076a
|
[
"MIT"
] | 1
|
2021-11-20T13:37:43.000Z
|
2021-11-20T13:37:43.000Z
|
argocd_python_client/model/v1_event_list.py
|
RyanSiu1995/argocd-python-client
|
2e8f097fe09f247a46ac70692241a93d1acd076a
|
[
"MIT"
] | null | null | null |
argocd_python_client/model/v1_event_list.py
|
RyanSiu1995/argocd-python-client
|
2e8f097fe09f247a46ac70692241a93d1acd076a
|
[
"MIT"
] | null | null | null |
"""
Consolidate Services
Description of all APIs # noqa: E501
The version of the OpenAPI document: version not set
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from argocd_python_client.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
from ..model_utils import OpenApiModel
from argocd_python_client.exceptions import ApiAttributeError
def lazy_import():
from argocd_python_client.model.v1_event import V1Event
from argocd_python_client.model.v1_list_meta import V1ListMeta
globals()['V1Event'] = V1Event
globals()['V1ListMeta'] = V1ListMeta
class V1EventList(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
lazy_import()
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'items': ([V1Event],), # noqa: E501
'metadata': (V1ListMeta,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'items': 'items', # noqa: E501
'metadata': 'metadata', # noqa: E501
}
read_only_vars = {
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, *args, **kwargs): # noqa: E501
"""V1EventList - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
items ([V1Event]): [optional] # noqa: E501
metadata (V1ListMeta): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""V1EventList - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
items ([V1Event]): [optional] # noqa: E501
metadata (V1ListMeta): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.")
| 42.958955
| 121
| 0.572136
|
import re
import sys
from argocd_python_client.model_utils import (
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
from ..model_utils import OpenApiModel
from argocd_python_client.exceptions import ApiAttributeError
def lazy_import():
from argocd_python_client.model.v1_event import V1Event
from argocd_python_client.model.v1_list_meta import V1ListMeta
globals()['V1Event'] = V1Event
globals()['V1ListMeta'] = V1ListMeta
class V1EventList(ModelNormal):
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
lazy_import()
return (bool, date, datetime, dict, float, int, list, str, none_type,)
_nullable = False
@cached_property
def openapi_types():
lazy_import()
return {
'items': ([V1Event],),
'metadata': (V1ListMeta,),
}
@cached_property
def discriminator():
return None
attribute_map = {
'items': 'items',
'metadata': 'metadata',
}
read_only_vars = {
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, *args, **kwargs):
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs):
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.")
| true
| true
|
f705700579504459d1022712175c926bb0c492e4
| 226
|
py
|
Python
|
src/SimonGame.py
|
busvar/VuelingGame_Backend
|
2be1afd85e1247dcc91ad23acec233bbf34b9d5f
|
[
"MIT"
] | null | null | null |
src/SimonGame.py
|
busvar/VuelingGame_Backend
|
2be1afd85e1247dcc91ad23acec233bbf34b9d5f
|
[
"MIT"
] | null | null | null |
src/SimonGame.py
|
busvar/VuelingGame_Backend
|
2be1afd85e1247dcc91ad23acec233bbf34b9d5f
|
[
"MIT"
] | null | null | null |
from flask import Flask, request
import json
app = Flask(__name__)
@app.route('/')
def hello():
outFile = {'Tittle' : "Simon Game", 'msg' : "Hello World!"}
outFile = json.dumps(outFile)
return json.loads(outFile)
| 22.6
| 63
| 0.659292
|
from flask import Flask, request
import json
app = Flask(__name__)
@app.route('/')
def hello():
outFile = {'Tittle' : "Simon Game", 'msg' : "Hello World!"}
outFile = json.dumps(outFile)
return json.loads(outFile)
| true
| true
|
f70570d4cfa7f238e84d553a1dc8710c1e1855b3
| 15,880
|
py
|
Python
|
pru/db/geo/geo_operations.py
|
euctrl-pru/rt-python
|
da5d0040e250bd159845a0d43bf0b73eab368863
|
[
"MIT"
] | null | null | null |
pru/db/geo/geo_operations.py
|
euctrl-pru/rt-python
|
da5d0040e250bd159845a0d43bf0b73eab368863
|
[
"MIT"
] | null | null | null |
pru/db/geo/geo_operations.py
|
euctrl-pru/rt-python
|
da5d0040e250bd159845a0d43bf0b73eab368863
|
[
"MIT"
] | null | null | null |
#
# Copyright (c) 2018 Via Technology Ltd. All Rights Reserved.
# Consult your license regarding permissions and restrictions.
#
"""
operations related to airspaces and intersections.
"""
from psycopg2 import Error, InternalError
from psycopg2.extensions import AsIs
from psycopg2.extras import DictCursor
from itertools import filterfalse
from functools import reduce
from shapely.wkt import loads
import pru.db.context as ctx
from pru.logger import logger
log = logger(__name__)
def make_point(lon, lat, connection):
"""
Makes a geo point
"""
cursor = connection.cursor()
query = "SELECT ST_MakePoint(%s, %s)"
params = (float(lon), float(lat))
cursor.execute(query, params)
return cursor.fetchone()
def make_augmented_point_from_position(position, flight_id, connection):
"""
Takes a position tuple and makes a augmented point.
"""
point = make_point(position[1], position[0], connection)
return {'flight_id': flight_id, 'lon': position[1], 'lat': position[0],
'geoPoint': point}
def make_augmented_points_from_positions(latitudes, longitudes, flight_id, connection):
"""
Takes a list of latitudes and a list of longitudes and a flight_id.
Makes a list of augmented points.
"""
return [make_augmented_point_from_position(position, flight_id, connection) for position in zip(latitudes, longitudes)]
def extract_point_list_from_augmented_points(augmented_points):
"""
Given a list or generator of augmented points extract the geo point
representation as a list.
"""
return list(map(lambda augmented_points: augmented_points['geoPoint'],
augmented_points))
def make_line_from_augmented_points(augmented_points, flight_id, connection):
"""
Given a list of augmented points create a geographic line.
"""
if (len(augmented_points) == 0):
log.warning(f"Creating a line from a list of points but the list "
"was empty for flight id {flight_id}.")
return [[]]
cursor = connection.cursor()
query = "SELECT ST_AsEWKT(ST_MakeLine(ARRAY[%s]));"
params = [augmented_points]
cursor.execute(query, params)
return cursor.fetchone()
def find_sectors_intersected_by(line_string, flight_id, min_altitude, max_altitude, context, connection):
"""
Lists the airspace ids and details of those airspaces where the
given line string intersects excluding those that are outside of the range of
altitudes of the trajectory.
"""
log.debug(f"Finding trajectory intersection with airspaces for flight id: {flight_id}")
schema_name = context[ctx.SCHEMA_NAME]
try:
with connection.cursor() as cursor:
query = "SELECT id, av_airspace_id, min_altitude, max_altitude " \
"from %s.sectors where " \
"NOT (max_altitude < %s OR min_altitude > %s) AND " \
"ST_Intersects(wkt, ST_GeographyFromText('SRID=4326;%s'));"
params = [AsIs(schema_name), min_altitude, max_altitude, AsIs(line_string)]
cursor.execute(query, params)
return cursor.fetchall()
except InternalError:
log.exception(f"Failed whist trying to find the intersection between "
"a route with flight id {flight_id} and the airspace model.")
return []
def find_user_sectors_intersected_by(line_string, flight_id, min_altitude, max_altitude, context, connection):
"""
Lists the user defined airspace uids and details of those airspaces where the
given line string intersects.
"""
log.debug(f"Finding trajectory intersection with user defined airspaces for flight id: {flight_id}")
schema_name = context[ctx.SCHEMA_NAME]
try:
with connection.cursor() as cursor:
query = "SELECT id, org_id, min_altitude, max_altitude, user_id, " \
"sector_name from %s.user_defined_sectors where " \
"NOT (max_altitude < %s OR min_altitude > %s) AND " \
"ST_Intersects(wkt, ST_GeographyFromText('SRID=4326;%s'));"
params = [AsIs(schema_name), min_altitude, max_altitude, AsIs(line_string)]
cursor.execute(query, params)
return cursor.fetchall()
except InternalError:
log.exception(f"Failed whist trying to find the intersection between "
"a route with flight id {flight_id} and the airspace model.")
return []
def make_geographic_trajectory(augmented_points, flight_id, connection):
"""
Given a list of augmented points create a geographic line segment.
"""
log.debug(f"Making geo trajectory for flight id: {flight_id}")
return make_line_from_augmented_points(
extract_point_list_from_augmented_points(augmented_points),
flight_id,
connection)[0]
def make_augmented_trajectory(augmented_points, geographic_trajectory, flight_id, min_altitude, max_altitude, connection, is_user_defined=False):
"""
Makes a trajectory augmented with geographic positions and a list of sectors
intersected by the trajectory excluding those that do not meet the altitude range
of the trajectory.
"""
log.debug(f"Creating an augmented trajectory for flight id: {flight_id}")
if not is_user_defined:
sectors = find_sectors_intersected_by(geographic_trajectory, flight_id, min_altitude, max_altitude, ctx.CONTEXT, connection)
else:
sectors = find_user_sectors_intersected_by(geographic_trajectory, flight_id, min_altitude, max_altitude, ctx.CONTEXT, connection)
return {'extendedPoints': augmented_points,
'line': geographic_trajectory,
'sectors': sectors,
'is_user_defined': is_user_defined}
def find_sector(db_ID, connection):
schemaName = ctx.CONTEXT[ctx.SCHEMA_NAME]
with connection.cursor(cursor_factory=DictCursor) as cursor:
cursor.execute("SELECT id, av_airspace_id, av_icao_state_id, av_name, min_altitude, max_altitude FROM %s.sectors WHERE "
"id = %s",
[AsIs(schemaName), db_ID])
return cursor.fetchone()
def find_sector_identifiers(db_ID, context, connection):
"""
Finds the identifiers for a sector given the db id of the sector.
"""
schemaName = context[ctx.SCHEMA_NAME]
with connection.cursor(cursor_factory=DictCursor) as cursor:
cursor.execute("SELECT av_airspace_id, av_icao_state_id, av_name FROM %s.sectors WHERE "
"id = %s",
[AsIs(schemaName), db_ID])
return cursor.fetchmany()
def find_airspace_by_database_ID(db_ID, context, connection, is_user_defined=False):
"""
Finds an aairspace with the given database id
Returns a list, list may be empty.
"""
schemaName = context[ctx.SCHEMA_NAME]
with connection.cursor(cursor_factory=DictCursor) as cursor:
if is_user_defined:
cursor.execute("SELECT * FROM %s.user_defined_sectors WHERE "
"id = %s", [AsIs(schemaName), db_ID])
return cursor.fetchmany()
else:
cursor.execute("SELECT * FROM %s.sectors WHERE "
"id = %s", [AsIs(schemaName), db_ID])
return cursor.fetchmany()
def originates(first_point, polygon_string, flight_id, sector_id, connection):
"""
If the first point is inside the given sector we determine that the
trajectory originates in the sector.
first_point wkb for the first point of the trajectory
returns True => originates in sectors
"""
cursor = connection.cursor()
query = "SELECT ST_Intersects(%s::geography, %s::geography);"
params = [first_point, polygon_string]
cursor.execute(query, params)
originates = cursor.fetchone()[0]
if originates:
log.debug(f"Flight with id {flight_id} originates in sector {sector_id}")
return originates
def find_line_poly_intersection_without_boundary(lineString, polygonString, connection):
"""
Use the geo db to find the intersections between the linestring and the unbounded polygon string.
The polygon is assumed to _NOT_ have a boundary around it.
"""
query = "SELECT ST_AsText(ST_Intersection(%s::geography, ST_Force2D(ST_Boundary(%s))::geography));"
params = [lineString, polygonString]
try:
with connection.cursor() as cursor:
cursor.execute(query, params)
res = cursor.fetchall()
return {'segmentStrings': res,
'ploygonString': polygonString}
except Error:
log.exception("Failed to find intersection : Error")
return []
def find_line_poly_intersection_with_boundary(lineString, polygonString, connection):
"""
Use the geo db to find the intersections between the linestring and the bounded polygon string.
The polygon is assumed to already have a boundary around it.
"""
query = "SELECT unit.find_intersections(%s, %s)"
params = [lineString, polygonString]
try:
with connection.cursor() as cursor:
cursor.execute(query, params)
res = cursor.fetchall()
return {'segmentStrings': res,
'ploygonString': polygonString}
except Error:
log.exception("Failed to find intersection : Error")
return []
def find_intersections(augmented_trajectory, min_altitude, max_altitude, flight_id, connection):
"""
Finds the points on the trajectory that intersect with the sectors of the
the augmented trajectory.
"""
log.debug(f"Finding intersection for flight id {flight_id}")
first_point = augmented_trajectory['extendedPoints'][0]['geoPoint']
first_point_lon = augmented_trajectory['extendedPoints'][0]['lon']
first_point_lat = augmented_trajectory['extendedPoints'][0]['lat']
is_user_defined = augmented_trajectory['is_user_defined']
# Find each sector
sector_IDs = [sector[0] for sector in augmented_trajectory['sectors']]
log.debug("Found sector ids %s", str(sector_IDs))
sectors = [find_airspace_by_database_ID(str(sector_id),
ctx.CONTEXT,
connection, is_user_defined)[0] for sector_id in sector_IDs]
# Find the points of the trajectory where the trajectory intersects
# with each sector
if is_user_defined:
segments = [{'flight_id': flight_id,
'intersections': find_line_poly_intersection_with_boundary(augmented_trajectory['line'],
sector['bounded_sector'],
connection),
'origin': {'is_origin': originates(first_point, sector['wkt'], flight_id, sector['id'], connection),
'origin_lat': first_point_lat,
'origin_lon': first_point_lon},
'id': sector['id'],
'org_id': sector['org_id'],
'user_id': sector['user_id'],
'sector_name': sector['sector_name'],
'min_altitude': sector['min_altitude'],
'max_altitude': sector['max_altitude'],
'is_cylinder': sector['is_cylinder'],
'is_user_defined': is_user_defined} for sector in sectors]
else:
segments = [{'flight_id': flight_id,
'intersections': find_line_poly_intersection_with_boundary(augmented_trajectory['line'],
sector['bounded_sector'],
connection),
'origin': {'is_origin': originates(first_point, sector['wkt'], flight_id, sector['id'], connection),
'origin_lat': first_point_lat,
'origin_lon': first_point_lon},
'id': sector['id'],
'av_icao_state_id': sector['av_icao_state_id'],
'av_name': sector['av_name'],
'av_airspace_id': sector['av_airspace_id'],
'min_altitude': sector['min_altitude'],
'max_altitude': sector['max_altitude'],
'is_user_defined': is_user_defined} for sector in sectors]
return segments
def extract(sector_id, shape, flight_id):
"""
Given a shapley shape find if we have a point or a multipoint.
For a point extract the y, x pair as a list of one tuple of sector_id,
latitude and longitude.
For a multipoint return a list of multiple tuples.
"""
if shape.geom_type == 'MultiPoint':
return [(sector_id, p.y, p.x) for p in shape]
elif shape.geom_type == 'Point':
return [(sector_id, shape.y, shape.x)]
else:
log.debug("Unknown geom type : %s in flight id %s and sector_id %s, was %s, skipping", shape.geom_type, flight_id, sector_id, str(shape))
return []
def extract_details_from_intersection(sector_id, wkt, origin, flight_id):
"""
Given an intersection wkt use shapley to create the point or multipoint
object. Then extract the latitude and longitudes from the (multi)point.
Returns a list of tuples of sector_id, latiitude and longitude
"""
intersection_tuples = extract(sector_id, loads(wkt), flight_id)
if origin['is_origin']:
# If this sector is an origin sector, add in the lat lons at the start.
intersection_tuples = [(sector_id, origin['origin_lat'], origin['origin_lon'])] + intersection_tuples
return intersection_tuples
def make_sector_description(intersection, is_user_defined=False):
"""
Makes a text description of the sector from the intersection description
"""
if is_user_defined:
return f'{intersection["org_id"]}/{intersection["user_id"]}/{intersection["sector_name"]}'
else:
return f'{intersection["av_icao_state_id"]}/{intersection["av_name"]}/{intersection["id"]}/{intersection["av_airspace_id"]}'
def make_sector_identifier(intersection):
"""
Makes a text version of the database id in the given intersection
"""
return f'{intersection["id"]}'
def extract_intersection_wkts(intersections):
"""
Given a list of intersection dicts return a list of wkts with sector
descriptive text and the origin details as a tuple.
ie ("some-text-made-from-sector-ids", wkt, {is_origin:False, origin_lat:lat, origin_lon: lon})
"""
return [(make_sector_identifier(intersection),
intersection['intersections']['segmentStrings'][0][0], intersection['origin'])
for intersection in intersections]
def merge_l_t(l, lt):
"""
Merge a list of tuples lt, each of three values into three lists l.
For example: [('a', 'b', 'c'), ('a', 'd', 'e')] ->
[['a', 'a'], ['b', 'd'], ['c', 'e']]
"""
for t in lt:
l[0].append(t[1])
l[1].append(t[2])
l[2].append(t[0])
return l
def create_intersection_data_structure(intersections, flight_id):
"""
Given the intersection data structures create a response tuple.
"""
# The intersection wkts are tuples of the sector_id, the wkt and the origin
# status for the intersection.
intersection_wkts = extract_intersection_wkts(intersections)
intersection_details = [extract_details_from_intersection(*intersection_wkt, flight_id) for intersection_wkt in intersection_wkts]
x_y_sector_ids = reduce(merge_l_t, intersection_details, [[], [], []])
return x_y_sector_ids[0], x_y_sector_ids[1], x_y_sector_ids[2]
| 42.573727
| 145
| 0.647922
|
from psycopg2 import Error, InternalError
from psycopg2.extensions import AsIs
from psycopg2.extras import DictCursor
from itertools import filterfalse
from functools import reduce
from shapely.wkt import loads
import pru.db.context as ctx
from pru.logger import logger
log = logger(__name__)
def make_point(lon, lat, connection):
cursor = connection.cursor()
query = "SELECT ST_MakePoint(%s, %s)"
params = (float(lon), float(lat))
cursor.execute(query, params)
return cursor.fetchone()
def make_augmented_point_from_position(position, flight_id, connection):
point = make_point(position[1], position[0], connection)
return {'flight_id': flight_id, 'lon': position[1], 'lat': position[0],
'geoPoint': point}
def make_augmented_points_from_positions(latitudes, longitudes, flight_id, connection):
return [make_augmented_point_from_position(position, flight_id, connection) for position in zip(latitudes, longitudes)]
def extract_point_list_from_augmented_points(augmented_points):
return list(map(lambda augmented_points: augmented_points['geoPoint'],
augmented_points))
def make_line_from_augmented_points(augmented_points, flight_id, connection):
if (len(augmented_points) == 0):
log.warning(f"Creating a line from a list of points but the list "
"was empty for flight id {flight_id}.")
return [[]]
cursor = connection.cursor()
query = "SELECT ST_AsEWKT(ST_MakeLine(ARRAY[%s]));"
params = [augmented_points]
cursor.execute(query, params)
return cursor.fetchone()
def find_sectors_intersected_by(line_string, flight_id, min_altitude, max_altitude, context, connection):
log.debug(f"Finding trajectory intersection with airspaces for flight id: {flight_id}")
schema_name = context[ctx.SCHEMA_NAME]
try:
with connection.cursor() as cursor:
query = "SELECT id, av_airspace_id, min_altitude, max_altitude " \
"from %s.sectors where " \
"NOT (max_altitude < %s OR min_altitude > %s) AND " \
"ST_Intersects(wkt, ST_GeographyFromText('SRID=4326;%s'));"
params = [AsIs(schema_name), min_altitude, max_altitude, AsIs(line_string)]
cursor.execute(query, params)
return cursor.fetchall()
except InternalError:
log.exception(f"Failed whist trying to find the intersection between "
"a route with flight id {flight_id} and the airspace model.")
return []
def find_user_sectors_intersected_by(line_string, flight_id, min_altitude, max_altitude, context, connection):
log.debug(f"Finding trajectory intersection with user defined airspaces for flight id: {flight_id}")
schema_name = context[ctx.SCHEMA_NAME]
try:
with connection.cursor() as cursor:
query = "SELECT id, org_id, min_altitude, max_altitude, user_id, " \
"sector_name from %s.user_defined_sectors where " \
"NOT (max_altitude < %s OR min_altitude > %s) AND " \
"ST_Intersects(wkt, ST_GeographyFromText('SRID=4326;%s'));"
params = [AsIs(schema_name), min_altitude, max_altitude, AsIs(line_string)]
cursor.execute(query, params)
return cursor.fetchall()
except InternalError:
log.exception(f"Failed whist trying to find the intersection between "
"a route with flight id {flight_id} and the airspace model.")
return []
def make_geographic_trajectory(augmented_points, flight_id, connection):
log.debug(f"Making geo trajectory for flight id: {flight_id}")
return make_line_from_augmented_points(
extract_point_list_from_augmented_points(augmented_points),
flight_id,
connection)[0]
def make_augmented_trajectory(augmented_points, geographic_trajectory, flight_id, min_altitude, max_altitude, connection, is_user_defined=False):
log.debug(f"Creating an augmented trajectory for flight id: {flight_id}")
if not is_user_defined:
sectors = find_sectors_intersected_by(geographic_trajectory, flight_id, min_altitude, max_altitude, ctx.CONTEXT, connection)
else:
sectors = find_user_sectors_intersected_by(geographic_trajectory, flight_id, min_altitude, max_altitude, ctx.CONTEXT, connection)
return {'extendedPoints': augmented_points,
'line': geographic_trajectory,
'sectors': sectors,
'is_user_defined': is_user_defined}
def find_sector(db_ID, connection):
schemaName = ctx.CONTEXT[ctx.SCHEMA_NAME]
with connection.cursor(cursor_factory=DictCursor) as cursor:
cursor.execute("SELECT id, av_airspace_id, av_icao_state_id, av_name, min_altitude, max_altitude FROM %s.sectors WHERE "
"id = %s",
[AsIs(schemaName), db_ID])
return cursor.fetchone()
def find_sector_identifiers(db_ID, context, connection):
schemaName = context[ctx.SCHEMA_NAME]
with connection.cursor(cursor_factory=DictCursor) as cursor:
cursor.execute("SELECT av_airspace_id, av_icao_state_id, av_name FROM %s.sectors WHERE "
"id = %s",
[AsIs(schemaName), db_ID])
return cursor.fetchmany()
def find_airspace_by_database_ID(db_ID, context, connection, is_user_defined=False):
schemaName = context[ctx.SCHEMA_NAME]
with connection.cursor(cursor_factory=DictCursor) as cursor:
if is_user_defined:
cursor.execute("SELECT * FROM %s.user_defined_sectors WHERE "
"id = %s", [AsIs(schemaName), db_ID])
return cursor.fetchmany()
else:
cursor.execute("SELECT * FROM %s.sectors WHERE "
"id = %s", [AsIs(schemaName), db_ID])
return cursor.fetchmany()
def originates(first_point, polygon_string, flight_id, sector_id, connection):
cursor = connection.cursor()
query = "SELECT ST_Intersects(%s::geography, %s::geography);"
params = [first_point, polygon_string]
cursor.execute(query, params)
originates = cursor.fetchone()[0]
if originates:
log.debug(f"Flight with id {flight_id} originates in sector {sector_id}")
return originates
def find_line_poly_intersection_without_boundary(lineString, polygonString, connection):
query = "SELECT ST_AsText(ST_Intersection(%s::geography, ST_Force2D(ST_Boundary(%s))::geography));"
params = [lineString, polygonString]
try:
with connection.cursor() as cursor:
cursor.execute(query, params)
res = cursor.fetchall()
return {'segmentStrings': res,
'ploygonString': polygonString}
except Error:
log.exception("Failed to find intersection : Error")
return []
def find_line_poly_intersection_with_boundary(lineString, polygonString, connection):
query = "SELECT unit.find_intersections(%s, %s)"
params = [lineString, polygonString]
try:
with connection.cursor() as cursor:
cursor.execute(query, params)
res = cursor.fetchall()
return {'segmentStrings': res,
'ploygonString': polygonString}
except Error:
log.exception("Failed to find intersection : Error")
return []
def find_intersections(augmented_trajectory, min_altitude, max_altitude, flight_id, connection):
log.debug(f"Finding intersection for flight id {flight_id}")
first_point = augmented_trajectory['extendedPoints'][0]['geoPoint']
first_point_lon = augmented_trajectory['extendedPoints'][0]['lon']
first_point_lat = augmented_trajectory['extendedPoints'][0]['lat']
is_user_defined = augmented_trajectory['is_user_defined']
sector_IDs = [sector[0] for sector in augmented_trajectory['sectors']]
log.debug("Found sector ids %s", str(sector_IDs))
sectors = [find_airspace_by_database_ID(str(sector_id),
ctx.CONTEXT,
connection, is_user_defined)[0] for sector_id in sector_IDs]
if is_user_defined:
segments = [{'flight_id': flight_id,
'intersections': find_line_poly_intersection_with_boundary(augmented_trajectory['line'],
sector['bounded_sector'],
connection),
'origin': {'is_origin': originates(first_point, sector['wkt'], flight_id, sector['id'], connection),
'origin_lat': first_point_lat,
'origin_lon': first_point_lon},
'id': sector['id'],
'org_id': sector['org_id'],
'user_id': sector['user_id'],
'sector_name': sector['sector_name'],
'min_altitude': sector['min_altitude'],
'max_altitude': sector['max_altitude'],
'is_cylinder': sector['is_cylinder'],
'is_user_defined': is_user_defined} for sector in sectors]
else:
segments = [{'flight_id': flight_id,
'intersections': find_line_poly_intersection_with_boundary(augmented_trajectory['line'],
sector['bounded_sector'],
connection),
'origin': {'is_origin': originates(first_point, sector['wkt'], flight_id, sector['id'], connection),
'origin_lat': first_point_lat,
'origin_lon': first_point_lon},
'id': sector['id'],
'av_icao_state_id': sector['av_icao_state_id'],
'av_name': sector['av_name'],
'av_airspace_id': sector['av_airspace_id'],
'min_altitude': sector['min_altitude'],
'max_altitude': sector['max_altitude'],
'is_user_defined': is_user_defined} for sector in sectors]
return segments
def extract(sector_id, shape, flight_id):
if shape.geom_type == 'MultiPoint':
return [(sector_id, p.y, p.x) for p in shape]
elif shape.geom_type == 'Point':
return [(sector_id, shape.y, shape.x)]
else:
log.debug("Unknown geom type : %s in flight id %s and sector_id %s, was %s, skipping", shape.geom_type, flight_id, sector_id, str(shape))
return []
def extract_details_from_intersection(sector_id, wkt, origin, flight_id):
intersection_tuples = extract(sector_id, loads(wkt), flight_id)
if origin['is_origin']:
intersection_tuples = [(sector_id, origin['origin_lat'], origin['origin_lon'])] + intersection_tuples
return intersection_tuples
def make_sector_description(intersection, is_user_defined=False):
if is_user_defined:
return f'{intersection["org_id"]}/{intersection["user_id"]}/{intersection["sector_name"]}'
else:
return f'{intersection["av_icao_state_id"]}/{intersection["av_name"]}/{intersection["id"]}/{intersection["av_airspace_id"]}'
def make_sector_identifier(intersection):
return f'{intersection["id"]}'
def extract_intersection_wkts(intersections):
return [(make_sector_identifier(intersection),
intersection['intersections']['segmentStrings'][0][0], intersection['origin'])
for intersection in intersections]
def merge_l_t(l, lt):
for t in lt:
l[0].append(t[1])
l[1].append(t[2])
l[2].append(t[0])
return l
def create_intersection_data_structure(intersections, flight_id):
intersection_wkts = extract_intersection_wkts(intersections)
intersection_details = [extract_details_from_intersection(*intersection_wkt, flight_id) for intersection_wkt in intersection_wkts]
x_y_sector_ids = reduce(merge_l_t, intersection_details, [[], [], []])
return x_y_sector_ids[0], x_y_sector_ids[1], x_y_sector_ids[2]
| true
| true
|
f70570d708510d481a042af8e412be25e905032d
| 11,930
|
py
|
Python
|
accel/cherry/tinygrad/ops_cherry.py
|
andreiaugustin/tinygrad
|
adaf17559564c75a35e901fc4f735c8cc46577d7
|
[
"MIT"
] | 5,578
|
2020-10-18T16:26:28.000Z
|
2022-03-31T18:31:04.000Z
|
accel/cherry/tinygrad/ops_cherry.py
|
JunnYu/tinygrad
|
c0c2c0b0414dec0862aa442c60e905f39958f572
|
[
"MIT"
] | 219
|
2020-10-18T19:50:39.000Z
|
2022-03-01T16:54:53.000Z
|
accel/cherry/tinygrad/ops_cherry.py
|
JunnYu/tinygrad
|
c0c2c0b0414dec0862aa442c60e905f39958f572
|
[
"MIT"
] | 746
|
2020-10-18T20:09:37.000Z
|
2022-03-30T10:11:46.000Z
|
import numpy as np
from tinygrad.tensor import Function
from extra.cherry import *
# ************* unary ops *************
class ReLU(Function):
def forward(ctx, input):
ctx.save_for_backward(input)
return cherry_unop(input, UnaryOps.RELU)
def backward(ctx, grad_output):
input, = ctx.saved_tensors
return cherry_binop(grad_output, cherry_unop(input, UnaryOps.GT0), BinaryOps.MUL)
class Log(Function):
def forward(ctx, input):
ctx.save_for_backward(input)
return cherry_unop(input, UnaryOps.LOG)
def backward(ctx, grad_output):
input, = ctx.saved_tensors
return cherry_binop(grad_output, input, BinaryOps.DIV)
class Exp(Function):
def forward(ctx, input):
ret = cherry_unop(input, UnaryOps.EXP)
ctx.save_for_backward(ret)
return ret
def backward(ctx, grad_output):
ret, = ctx.saved_tensors
return cherry_binop(grad_output, ret, BinaryOps.MUL)
# ************* reduce ops *************
class Sum(Function):
def forward(ctx, input, axis=None):
ctx.save_for_backward(input, axis)
return cherry_reduceop(input, ReduceOps.SUM, axis)
def backward(ctx, grad_output):
input, axis = ctx.saved_tensors
if isinstance(axis, int): axis = [axis]
shape = [1 if axis is None or i in axis else input.shape[i] for i in range(len(input.shape))]
return cherry_binop(grad_output.reshape(shape), np.zeros_like(input), BinaryOps.ADD)
class Max(Function):
def forward(ctx, inp, axis=None):
if isinstance(axis, int): axis = [axis]
#ret = np.amax(inp, axis=None if axis is None else tuple(axis), keepdims=True)
ret = cherry_reduceop(inp, ReduceOps.MAX, None if axis is None else tuple(axis), keepdims=True)
ctx.save_for_backward(inp, axis, ret)
if axis is not None:
ret = ret.reshape([inp.shape[i] for i in range(len(inp.shape)) if i not in axis])
return ret
def backward(ctx, grad_output):
input, axis, ret = ctx.saved_tensors
shape = [1 if axis is None or i in axis else input.shape[i] for i in range(len(input.shape))]
ret2 = (input==ret.reshape(shape))
#div = ret2.sum(axis=None if axis is None else tuple(axis), keepdims=True)
#return ret2*grad_output.reshape(shape)/div
div = cherry_reduceop(ret2, ReduceOps.SUM, axis=None if axis is None else tuple(axis), keepdims=True)
return cherry_binop(cherry_binop(ret2, grad_output.reshape(shape), BinaryOps.MUL), div, BinaryOps.DIV)
# ************* binary ops *************
def unbroadcast(out, in_sh):
# adjoint operation to broadcast is sum. Need to sum all axis with 1 = in_sh[i] < out.shape[i]
sum_axis = tuple([i for i in range(len(in_sh)) if in_sh[i]==1 and out.shape[i]>1]) if in_sh != (1,) else None
return cherry_reduceop(out, ReduceOps.SUM, sum_axis).reshape(in_sh)
class Add(Function):
def forward(ctx, x, y):
ctx.save_for_backward(x.shape, y.shape)
return cherry_binop(x, y, BinaryOps.ADD)
def backward(ctx, grad_output):
shape_x, shape_y = ctx.saved_tensors
return unbroadcast(grad_output, shape_x), unbroadcast(grad_output, shape_y)
class Sub(Function):
def forward(ctx, x, y):
ctx.save_for_backward(x.shape, y.shape)
return cherry_binop(x, y, BinaryOps.SUB)
def backward(ctx, grad_output):
shape_x, shape_y = ctx.saved_tensors
return unbroadcast(grad_output, shape_x), unbroadcast(-grad_output, shape_y)
class Mul(Function):
def forward(ctx, x, y):
ctx.save_for_backward(x, y)
return cherry_binop(x, y, BinaryOps.MUL)
def backward(ctx, grad_output):
x,y = ctx.saved_tensors
return unbroadcast(y*grad_output, x.shape), unbroadcast(x*grad_output, y.shape)
class Pow(Function):
def forward(ctx, x, y):
ctx.save_for_backward(x, y)
return cherry_binop(x, y, BinaryOps.POW)
def backward(ctx, grad_output):
x,y = ctx.saved_tensors
return unbroadcast(y * (x**(y-1.0)) * grad_output, x.shape), \
unbroadcast((x**y) * np.log(x) * grad_output, y.shape)
# ************* processing ops *************
class Matmul(Function):
def forward(ctx, input, weight):
ctx.save_for_backward(input, weight)
return cherry_matmul(input, weight)
def backward(ctx, grad_output):
input, weight = ctx.saved_tensors
grad_input = cherry_matmul(grad_output, weight, transpose_w=True)
grad_weight = cherry_matmul(input, grad_output, transpose_x=True)
return grad_input, grad_weight
class Conv2D(Function):
def forward(ctx, x, w, stride=1, groups=1):
if type(ctx.stride) == int:
ctx.stride = (ctx.stride, ctx.stride)
cout,cin,H,W = w.shape
ys,xs = ctx.stride
bs,cin_ = x.shape[0], x.shape[1]
iy,ix = x.shape[2],x.shape[3]
oy,ox = (x.shape[2]-(H-ys))//ys, (x.shape[3]-(W-xs))//xs
assert cin*ctx.groups == cin_
assert cout % ctx.groups == 0
rcout = cout//ctx.groups
# if H == 1 and W == 1 and ctx.groups == 1 and ctx.stride == (1,1):
gx = x.reshape(bs,ctx.groups,cin,x.shape[2],x.shape[3])
tx = np.lib.stride_tricks.as_strided(gx,
shape=(bs, ctx.groups, cin, oy, ox, H, W),
strides=(*gx.strides[0:3], gx.strides[3]*ys, gx.strides[4]*xs, *gx.strides[3:5]),
writeable=False,
)
tw = w.reshape(ctx.groups, rcout, cin, H, W)
ctx.save_for_backward(tx, tw, x.shape)
print((*gx.strides[0:3], gx.strides[3]*ys, gx.strides[4]*xs, *gx.strides[3:5]))
"""
ret = np.zeros((bs,ctx.groups,oy,ox,rcout),dtype=x.dtype)
for g in range(ctx.groups):
#ijYXyx,kjyx -> iYXk ->ikYX
ret[:,g] += np.tensordot(tx[:,g], tw[g], ((1,4,5),(1,2,3)))
print(bs, ctx.groups, cin)
return np.moveaxis(ret,4,2).reshape(bs, cout, oy, ox)
"""
cherry_dmar(SLOT(0), x) # bs, groups, cin, x.shape[2], x.shape[3]
cherry_dmar(SLOT(1), w) # groups, rcout, cin, H, W
cherry_reset_counts()
print(bs, ctx.groups, rcout, oy, ox, cin, H, W)
for B in range(0, bs):
if cin == 1 and rcout == 1 and ctx.groups > 1:
# hmm, this doesn't work, it's not a matmul
# you always have to loop over the groups, since they aren't joint
# the idea would be to collapse the HxW into the matmul, but you'd be limited to 9 for 3x3
# and while the load is easy in the weight matrix, it's hard in the image matrix (3 strides)
# and only the diagonal of the matrix would be useful! groups aren't channels!
# [(1, 144, 58, 58), (144, 1, 3, 3)] -> (1, 144, 56, 56)
# what does a grouped 1x1 conv look like?
# bs x groups x yx -- groups x 1 --> bs x groups x yx
# it looks like a broadcasted multiply
#print("opt1")
# x: bs x groups x iy x ix
# w: groups x H x W
# out: bs x groups x oy x ox
# ix x groups x groups
for g in range(0, groups, SZ):
for Y in range(0, oy):
for X in range(0, ox, SZ):
IY,IX = Y*ys,X*xs
riski_zero(Reg.MATMUL_ACC)
for y in range(IY, IY+H):
for x in range(IX, IX+W):
riski_load(Reg.MATMUL_INPUT,
SLOT(0) + B*groups*iy*ix + g*iy*ix + y*ix + x,
xs, iy*ix, min(SZ, ox-X), min(SZ, groups-g))
# 0 here is for broadcasting
riski_load(Reg.MATMUL_WEIGHTS,
SLOT(1) + g*H*W + (y-IY)*W + (x-IX),
0, H*W, SZ, min(SZ, groups-g))
riski_mulacc()
#risk_regdump()
riski_store(Reg.MATMUL_ACC,
SLOT(2) + B*groups*oy*ox + g*oy*ox + Y*ox + X,
1, oy*ox, min(SZ, ox-X), min(SZ, groups-g))
elif H == 1 and W == 1 and xs == 1 and ys == 1:
#print("opt2")
# oxy x cin x rcout -- unstrided 1x1
# this is a simple matmul
for g in range(0, groups):
for c in range(0, rcout, SZ):
yx = oy*ox
assert yx == iy*ix
for YX in range(0, oy*ox, SZ): # these are next to each other
# inner conv
riski_zero(Reg.MATMUL_ACC)
for ci in range(0, cin, SZ):
riski_load(Reg.MATMUL_INPUT,
SLOT(0) + B*groups*cin*yx + g*cin*yx + ci*yx + YX,
1, yx, min(SZ, yx-YX), min(SZ, cin-ci))
riski_load(Reg.MATMUL_WEIGHTS,
SLOT(1) + g*rcout*cin + c*cin + ci,
1, cin, min(SZ, cin-ci), min(SZ, rcout-c))
riski_matmul()
riski_store(Reg.MATMUL_ACC,
SLOT(2) + B*groups*rcout*yx + g*rcout*yx + c*yx + YX,
1, yx, min(SZ, yx-YX), min(SZ, rcout-c))
else:
#print("unoptimized")
# ox x cin x rcout -- unoptimized
for g in range(0, groups):
for c in range(0, rcout, SZ):
for Y in range(0, oy):
for X in range(0, ox, SZ):
IY,IX = Y*ys,X*xs
# inner conv
riski_zero(Reg.MATMUL_ACC)
for ci in range(0, cin, SZ):
# not a loop in 1x1 convs, 9 in 3x3, 25 in 5x5
for y in range(IY, IY+H):
for x in range(IX, IX+W):
riski_load(Reg.MATMUL_INPUT,
SLOT(0) + B*groups*cin*iy*ix + g*cin*iy*ix + ci*iy*ix + y*ix + x,
xs, iy*ix, min(SZ, ox-X), min(SZ, cin-ci))
riski_load(Reg.MATMUL_WEIGHTS,
SLOT(1) + g*rcout*cin*H*W + c*cin*H*W + ci*H*W + (y-IY)*W + (x-IX),
H*W, cin*H*W, min(SZ, cin-ci), min(SZ, rcout-c))
riski_matmul()
riski_store(Reg.MATMUL_ACC,
SLOT(2) + B*groups*rcout*oy*ox + g*rcout*oy*ox + c*oy*ox + Y*ox + X,
1, oy*ox, min(SZ, ox-X), min(SZ, rcout-c))
cherry_print_counts()
#print(x.shape, w.shape, "->", ret.shape)
return cherry_dmaw(SLOT(2), (bs, cout, oy, ox))
def backward(ctx, grad_output):
bs,_,oy,ox = grad_output.shape
tx, tw, x_shape = ctx.saved_tensors
_,rcout,cin,H,W = tw.shape
ys,xs = ctx.stride
OY,OX = x_shape[2:4]
ggg = grad_output.reshape(bs,ctx.groups,rcout,oy,ox)
gdw = np.zeros((ctx.groups,rcout,cin,H,W), dtype=tx.dtype)
if cin >= 16:
# optimize for large channel count
for g in range(ctx.groups):
#'ikYX,ijYXyx -> kjyx'
for i in range(ggg[:,g].shape[1]):
for m in range(tx[:,g].shape[4]):
for n in range(tx[:,g].shape[5]):
# Use transposes to ensure reshape keeps the correct dimension (channel dimension) when multiple dimensions have the same size
big_matrix = np.transpose(tx[:,g][:, :, :, :, m, n], (1, 0, 2, 3)).reshape(tx[:,g].shape[1], -1).T
gdw[g][i, :, m, n] = cherry_matmul(ggg[:,g][:,i].reshape(1, -1), big_matrix).flatten()
else:
# unoptimized
for g in range(ctx.groups):
#'ikYX,ijYXyx -> kjyx'
for i in range(ggg[:,g].shape[1]):
for j in range(tx[:,g].shape[1]):
for m in range(tx[:,g].shape[4]):
big_matrix = tx[:,g][:,j, :, :, m].reshape(-1, tx[:,g].shape[5])
gdw[g][i, j, m] = cherry_matmul(ggg[:,g][:,i].reshape(1, -1), big_matrix).flatten()
# needs to be optimized separately for large oy and ox, versus large ctx.groups
gdx = np.zeros((bs,ctx.groups,cin,OY,OX), dtype=tx.dtype)
for k in range(oy*ox):
Y, X = k//ox, k%ox
iY,iX = Y*ys, X*xs
big_matrix = []
for g in range(ctx.groups):
big_matrix.append(cherry_matmul(ggg[:,g,:,Y,X].reshape(bs, -1), tw[g].reshape(rcout, -1)).reshape((bs, cin, H, W)))
gdx[:, :, :, iY:iY+H, iX:iX+W] = cherry_binop(gdx[:, :, :, iY:iY+H, iX:iX+W], np.array(np.transpose(big_matrix, (1, 0, 2, 3, 4))), BinaryOps.ADD)
return gdx.reshape((bs, ctx.groups*cin, OY, OX)), gdw.reshape((ctx.groups*rcout, cin, H, W))
| 39.766667
| 151
| 0.580553
|
import numpy as np
from tinygrad.tensor import Function
from extra.cherry import *
class ReLU(Function):
def forward(ctx, input):
ctx.save_for_backward(input)
return cherry_unop(input, UnaryOps.RELU)
def backward(ctx, grad_output):
input, = ctx.saved_tensors
return cherry_binop(grad_output, cherry_unop(input, UnaryOps.GT0), BinaryOps.MUL)
class Log(Function):
def forward(ctx, input):
ctx.save_for_backward(input)
return cherry_unop(input, UnaryOps.LOG)
def backward(ctx, grad_output):
input, = ctx.saved_tensors
return cherry_binop(grad_output, input, BinaryOps.DIV)
class Exp(Function):
def forward(ctx, input):
ret = cherry_unop(input, UnaryOps.EXP)
ctx.save_for_backward(ret)
return ret
def backward(ctx, grad_output):
ret, = ctx.saved_tensors
return cherry_binop(grad_output, ret, BinaryOps.MUL)
class Sum(Function):
def forward(ctx, input, axis=None):
ctx.save_for_backward(input, axis)
return cherry_reduceop(input, ReduceOps.SUM, axis)
def backward(ctx, grad_output):
input, axis = ctx.saved_tensors
if isinstance(axis, int): axis = [axis]
shape = [1 if axis is None or i in axis else input.shape[i] for i in range(len(input.shape))]
return cherry_binop(grad_output.reshape(shape), np.zeros_like(input), BinaryOps.ADD)
class Max(Function):
def forward(ctx, inp, axis=None):
if isinstance(axis, int): axis = [axis]
ret = cherry_reduceop(inp, ReduceOps.MAX, None if axis is None else tuple(axis), keepdims=True)
ctx.save_for_backward(inp, axis, ret)
if axis is not None:
ret = ret.reshape([inp.shape[i] for i in range(len(inp.shape)) if i not in axis])
return ret
def backward(ctx, grad_output):
input, axis, ret = ctx.saved_tensors
shape = [1 if axis is None or i in axis else input.shape[i] for i in range(len(input.shape))]
ret2 = (input==ret.reshape(shape))
div = cherry_reduceop(ret2, ReduceOps.SUM, axis=None if axis is None else tuple(axis), keepdims=True)
return cherry_binop(cherry_binop(ret2, grad_output.reshape(shape), BinaryOps.MUL), div, BinaryOps.DIV)
def unbroadcast(out, in_sh):
sum_axis = tuple([i for i in range(len(in_sh)) if in_sh[i]==1 and out.shape[i]>1]) if in_sh != (1,) else None
return cherry_reduceop(out, ReduceOps.SUM, sum_axis).reshape(in_sh)
class Add(Function):
def forward(ctx, x, y):
ctx.save_for_backward(x.shape, y.shape)
return cherry_binop(x, y, BinaryOps.ADD)
def backward(ctx, grad_output):
shape_x, shape_y = ctx.saved_tensors
return unbroadcast(grad_output, shape_x), unbroadcast(grad_output, shape_y)
class Sub(Function):
def forward(ctx, x, y):
ctx.save_for_backward(x.shape, y.shape)
return cherry_binop(x, y, BinaryOps.SUB)
def backward(ctx, grad_output):
shape_x, shape_y = ctx.saved_tensors
return unbroadcast(grad_output, shape_x), unbroadcast(-grad_output, shape_y)
class Mul(Function):
def forward(ctx, x, y):
ctx.save_for_backward(x, y)
return cherry_binop(x, y, BinaryOps.MUL)
def backward(ctx, grad_output):
x,y = ctx.saved_tensors
return unbroadcast(y*grad_output, x.shape), unbroadcast(x*grad_output, y.shape)
class Pow(Function):
def forward(ctx, x, y):
ctx.save_for_backward(x, y)
return cherry_binop(x, y, BinaryOps.POW)
def backward(ctx, grad_output):
x,y = ctx.saved_tensors
return unbroadcast(y * (x**(y-1.0)) * grad_output, x.shape), \
unbroadcast((x**y) * np.log(x) * grad_output, y.shape)
class Matmul(Function):
def forward(ctx, input, weight):
ctx.save_for_backward(input, weight)
return cherry_matmul(input, weight)
def backward(ctx, grad_output):
input, weight = ctx.saved_tensors
grad_input = cherry_matmul(grad_output, weight, transpose_w=True)
grad_weight = cherry_matmul(input, grad_output, transpose_x=True)
return grad_input, grad_weight
class Conv2D(Function):
def forward(ctx, x, w, stride=1, groups=1):
if type(ctx.stride) == int:
ctx.stride = (ctx.stride, ctx.stride)
cout,cin,H,W = w.shape
ys,xs = ctx.stride
bs,cin_ = x.shape[0], x.shape[1]
iy,ix = x.shape[2],x.shape[3]
oy,ox = (x.shape[2]-(H-ys))//ys, (x.shape[3]-(W-xs))//xs
assert cin*ctx.groups == cin_
assert cout % ctx.groups == 0
rcout = cout//ctx.groups
gx = x.reshape(bs,ctx.groups,cin,x.shape[2],x.shape[3])
tx = np.lib.stride_tricks.as_strided(gx,
shape=(bs, ctx.groups, cin, oy, ox, H, W),
strides=(*gx.strides[0:3], gx.strides[3]*ys, gx.strides[4]*xs, *gx.strides[3:5]),
writeable=False,
)
tw = w.reshape(ctx.groups, rcout, cin, H, W)
ctx.save_for_backward(tx, tw, x.shape)
print((*gx.strides[0:3], gx.strides[3]*ys, gx.strides[4]*xs, *gx.strides[3:5]))
cherry_dmar(SLOT(0), x)
cherry_dmar(SLOT(1), w)
cherry_reset_counts()
print(bs, ctx.groups, rcout, oy, ox, cin, H, W)
for B in range(0, bs):
if cin == 1 and rcout == 1 and ctx.groups > 1:
# the idea would be to collapse the HxW into the matmul, but you'd be limited to 9 for 3x3
# and only the diagonal of the matrix would be useful! groups aren't channels!
for g in range(0, groups, SZ):
for Y in range(0, oy):
for X in range(0, ox, SZ):
IY,IX = Y*ys,X*xs
riski_zero(Reg.MATMUL_ACC)
for y in range(IY, IY+H):
for x in range(IX, IX+W):
riski_load(Reg.MATMUL_INPUT,
SLOT(0) + B*groups*iy*ix + g*iy*ix + y*ix + x,
xs, iy*ix, min(SZ, ox-X), min(SZ, groups-g))
riski_load(Reg.MATMUL_WEIGHTS,
SLOT(1) + g*H*W + (y-IY)*W + (x-IX),
0, H*W, SZ, min(SZ, groups-g))
riski_mulacc()
riski_store(Reg.MATMUL_ACC,
SLOT(2) + B*groups*oy*ox + g*oy*ox + Y*ox + X,
1, oy*ox, min(SZ, ox-X), min(SZ, groups-g))
elif H == 1 and W == 1 and xs == 1 and ys == 1:
for g in range(0, groups):
for c in range(0, rcout, SZ):
yx = oy*ox
assert yx == iy*ix
for YX in range(0, oy*ox, SZ):
riski_zero(Reg.MATMUL_ACC)
for ci in range(0, cin, SZ):
riski_load(Reg.MATMUL_INPUT,
SLOT(0) + B*groups*cin*yx + g*cin*yx + ci*yx + YX,
1, yx, min(SZ, yx-YX), min(SZ, cin-ci))
riski_load(Reg.MATMUL_WEIGHTS,
SLOT(1) + g*rcout*cin + c*cin + ci,
1, cin, min(SZ, cin-ci), min(SZ, rcout-c))
riski_matmul()
riski_store(Reg.MATMUL_ACC,
SLOT(2) + B*groups*rcout*yx + g*rcout*yx + c*yx + YX,
1, yx, min(SZ, yx-YX), min(SZ, rcout-c))
else:
for g in range(0, groups):
for c in range(0, rcout, SZ):
for Y in range(0, oy):
for X in range(0, ox, SZ):
IY,IX = Y*ys,X*xs
riski_zero(Reg.MATMUL_ACC)
for ci in range(0, cin, SZ):
for y in range(IY, IY+H):
for x in range(IX, IX+W):
riski_load(Reg.MATMUL_INPUT,
SLOT(0) + B*groups*cin*iy*ix + g*cin*iy*ix + ci*iy*ix + y*ix + x,
xs, iy*ix, min(SZ, ox-X), min(SZ, cin-ci))
riski_load(Reg.MATMUL_WEIGHTS,
SLOT(1) + g*rcout*cin*H*W + c*cin*H*W + ci*H*W + (y-IY)*W + (x-IX),
H*W, cin*H*W, min(SZ, cin-ci), min(SZ, rcout-c))
riski_matmul()
riski_store(Reg.MATMUL_ACC,
SLOT(2) + B*groups*rcout*oy*ox + g*rcout*oy*ox + c*oy*ox + Y*ox + X,
1, oy*ox, min(SZ, ox-X), min(SZ, rcout-c))
cherry_print_counts()
return cherry_dmaw(SLOT(2), (bs, cout, oy, ox))
def backward(ctx, grad_output):
bs,_,oy,ox = grad_output.shape
tx, tw, x_shape = ctx.saved_tensors
_,rcout,cin,H,W = tw.shape
ys,xs = ctx.stride
OY,OX = x_shape[2:4]
ggg = grad_output.reshape(bs,ctx.groups,rcout,oy,ox)
gdw = np.zeros((ctx.groups,rcout,cin,H,W), dtype=tx.dtype)
if cin >= 16:
for g in range(ctx.groups):
for i in range(ggg[:,g].shape[1]):
for m in range(tx[:,g].shape[4]):
for n in range(tx[:,g].shape[5]):
big_matrix = np.transpose(tx[:,g][:, :, :, :, m, n], (1, 0, 2, 3)).reshape(tx[:,g].shape[1], -1).T
gdw[g][i, :, m, n] = cherry_matmul(ggg[:,g][:,i].reshape(1, -1), big_matrix).flatten()
else:
for g in range(ctx.groups):
for i in range(ggg[:,g].shape[1]):
for j in range(tx[:,g].shape[1]):
for m in range(tx[:,g].shape[4]):
big_matrix = tx[:,g][:,j, :, :, m].reshape(-1, tx[:,g].shape[5])
gdw[g][i, j, m] = cherry_matmul(ggg[:,g][:,i].reshape(1, -1), big_matrix).flatten()
gdx = np.zeros((bs,ctx.groups,cin,OY,OX), dtype=tx.dtype)
for k in range(oy*ox):
Y, X = k//ox, k%ox
iY,iX = Y*ys, X*xs
big_matrix = []
for g in range(ctx.groups):
big_matrix.append(cherry_matmul(ggg[:,g,:,Y,X].reshape(bs, -1), tw[g].reshape(rcout, -1)).reshape((bs, cin, H, W)))
gdx[:, :, :, iY:iY+H, iX:iX+W] = cherry_binop(gdx[:, :, :, iY:iY+H, iX:iX+W], np.array(np.transpose(big_matrix, (1, 0, 2, 3, 4))), BinaryOps.ADD)
return gdx.reshape((bs, ctx.groups*cin, OY, OX)), gdw.reshape((ctx.groups*rcout, cin, H, W))
| true
| true
|
f705734671161b9c806e3992ccd70e77e6586843
| 3,536
|
py
|
Python
|
scripts/e78.py
|
JackKelly/neuralnilm_prototype
|
2119292e7d5c8a137797ad3c9abf9f37e7f749af
|
[
"MIT"
] | 38
|
2015-08-14T14:38:52.000Z
|
2021-12-15T03:21:04.000Z
|
scripts/e78.py
|
VidipG/neuralnilm_prototype
|
2119292e7d5c8a137797ad3c9abf9f37e7f749af
|
[
"MIT"
] | null | null | null |
scripts/e78.py
|
VidipG/neuralnilm_prototype
|
2119292e7d5c8a137797ad3c9abf9f37e7f749af
|
[
"MIT"
] | 26
|
2015-09-24T20:55:26.000Z
|
2021-12-07T15:42:09.000Z
|
from __future__ import print_function, division
from neuralnilm import Net, RealApplianceSource, BLSTMLayer, SubsampleLayer, DimshuffleLayer
from lasagne.nonlinearities import sigmoid, rectify
from lasagne.objectives import crossentropy
from lasagne.init import Uniform, Normal
from lasagne.layers import LSTMLayer, DenseLayer, Conv1DLayer, ReshapeLayer
"""
Setup:
* in_to_cell init weights are now Normal(1.0)
* output all appliances
* fix bug in RealApplianceSource
* use cross-entropy
* smaller network
* power targets
* trying without first two sigmoid layers.
* updated to craffel/nntools commit 097aca480d60fdfada513c20070f8132d71a26b0
which fixes LSTM bug.
https://github.com/craffel/nntools/commit/097aca480d60fdfada513c20070f8132d71a26b0
* Subsampling *bidirectional* LSTM
* Output every sequence in the batch
* Change W_in_to_cell from Normal(1.0) to Uniform(5)
* put back the two sigmoid layers
* use Conv1D to create a hierarchical subsampling LSTM
* Using LSTM (not BLSTM) to speed up training while testing
* Use dimshuffle not reshape
* 2 dense layers back
* back to default init
* conv between LSTMs.
* More data
* BLSTM
* Try just using a 1D convnet on input
* add second Convnet layer (not sure this is correct thing to do?)
* third conv layer
* large inits
* back to 2 conv layers
e70
* Based on e65
* Using sigmoid instead of rectify in Conv1D layers
e71
* Larger layers
* More data
e72
* At a third conv layer
e73
* Add a dense layer after 3 conv layers
e74
* Removed dense layer after 3 conv layers (because it failed to learn anything)
* Trying standard inits for weights and biases throughout network.
e75
* Putting back large init for first layer
e76
* Removed 3rd conv layer
e77
* Try init Uniform(1)
e78
* Back to large inits for first layers
* Trying 3rd conv layer, also with large init
Results
"""
source = RealApplianceSource(
'/data/dk3810/ukdale.h5',
['fridge freezer', 'hair straighteners', 'television'],
max_input_power=1000, max_appliance_powers=[300, 500, 200],
window=("2013-06-01", "2014-07-01"),
output_one_appliance=False,
boolean_targets=False,
min_on_duration=60,
input_padding=8
)
net = Net(
experiment_name="e78",
source=source,
learning_rate=1e-1,
save_plot_interval=50,
loss_function=crossentropy,
layers_config=[
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1)
},
{
'type': Conv1DLayer,
'num_filters': 50,
'filter_length': 3,
'stride': 1,
'nonlinearity': sigmoid,
'W': Uniform(25),
'b': Uniform(25)
},
{
'type': Conv1DLayer,
'num_filters': 50,
'filter_length': 3,
'stride': 1,
'nonlinearity': sigmoid,
'W': Uniform(10),
'b': Uniform(10)
},
{
'type': Conv1DLayer,
'num_filters': 50,
'filter_length': 5,
'stride': 1,
'nonlinearity': sigmoid,
'W': Uniform(10),
'b': Uniform(10)
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1)
},
{
'type': LSTMLayer,
'num_units': 80,
'W_in_to_cell': Uniform(5)
},
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': sigmoid
}
]
)
net.print_net()
net.compile()
net.fit()
| 24.555556
| 92
| 0.633201
|
from __future__ import print_function, division
from neuralnilm import Net, RealApplianceSource, BLSTMLayer, SubsampleLayer, DimshuffleLayer
from lasagne.nonlinearities import sigmoid, rectify
from lasagne.objectives import crossentropy
from lasagne.init import Uniform, Normal
from lasagne.layers import LSTMLayer, DenseLayer, Conv1DLayer, ReshapeLayer
source = RealApplianceSource(
'/data/dk3810/ukdale.h5',
['fridge freezer', 'hair straighteners', 'television'],
max_input_power=1000, max_appliance_powers=[300, 500, 200],
window=("2013-06-01", "2014-07-01"),
output_one_appliance=False,
boolean_targets=False,
min_on_duration=60,
input_padding=8
)
net = Net(
experiment_name="e78",
source=source,
learning_rate=1e-1,
save_plot_interval=50,
loss_function=crossentropy,
layers_config=[
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1)
},
{
'type': Conv1DLayer,
'num_filters': 50,
'filter_length': 3,
'stride': 1,
'nonlinearity': sigmoid,
'W': Uniform(25),
'b': Uniform(25)
},
{
'type': Conv1DLayer,
'num_filters': 50,
'filter_length': 3,
'stride': 1,
'nonlinearity': sigmoid,
'W': Uniform(10),
'b': Uniform(10)
},
{
'type': Conv1DLayer,
'num_filters': 50,
'filter_length': 5,
'stride': 1,
'nonlinearity': sigmoid,
'W': Uniform(10),
'b': Uniform(10)
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1)
},
{
'type': LSTMLayer,
'num_units': 80,
'W_in_to_cell': Uniform(5)
},
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': sigmoid
}
]
)
net.print_net()
net.compile()
net.fit()
| true
| true
|
f70573dc0c48e48f0f19f0d85a70bb4837832aa3
| 278
|
py
|
Python
|
tests/core/slashing/msgs_test.py
|
yeeyangtee/terra-sdk-python
|
44e31290cfcb5563dd31a0d9c64c3ef2af72c0e2
|
[
"MIT"
] | 24
|
2021-05-30T05:48:33.000Z
|
2021-10-07T04:47:15.000Z
|
tests/core/slashing/msgs_test.py
|
yeeyangtee/terra-sdk-python
|
44e31290cfcb5563dd31a0d9c64c3ef2af72c0e2
|
[
"MIT"
] | 18
|
2021-05-30T09:05:26.000Z
|
2021-10-17T07:12:12.000Z
|
tests/core/slashing/msgs_test.py
|
yeeyangtee/terra-sdk-python
|
44e31290cfcb5563dd31a0d9c64c3ef2af72c0e2
|
[
"MIT"
] | 10
|
2021-02-11T00:56:04.000Z
|
2021-05-27T08:37:49.000Z
|
from terra_sdk.core.slashing import MsgUnjail
def test_deserializes_msg_unjail_examples(load_msg_examples):
examples = load_msg_examples(MsgUnjail.type, "./MsgUnjail.data.json")
for example in examples:
assert MsgUnjail.from_data(example).to_data() == example
| 34.75
| 73
| 0.780576
|
from terra_sdk.core.slashing import MsgUnjail
def test_deserializes_msg_unjail_examples(load_msg_examples):
examples = load_msg_examples(MsgUnjail.type, "./MsgUnjail.data.json")
for example in examples:
assert MsgUnjail.from_data(example).to_data() == example
| true
| true
|
f7057425a1dbc05857f4d03d917ef320093bcace
| 1,514
|
py
|
Python
|
server/whistle_server/endpoints/login.py
|
Sailer43/Whistle
|
fff23638e60a3c9d5e3ed16016b47bf93df51088
|
[
"MIT"
] | null | null | null |
server/whistle_server/endpoints/login.py
|
Sailer43/Whistle
|
fff23638e60a3c9d5e3ed16016b47bf93df51088
|
[
"MIT"
] | null | null | null |
server/whistle_server/endpoints/login.py
|
Sailer43/Whistle
|
fff23638e60a3c9d5e3ed16016b47bf93df51088
|
[
"MIT"
] | null | null | null |
from flask_restful import abort, Resource
from flask import request, g, session
from flask.json import jsonify
from whistle_server.models.user import User
def verify_password(password, hashed):
from werkzeug.security import check_password_hash
return check_password_hash(hashed, password)
class LoginEndpoint(Resource):
def post(self):
username = request.json.get('username')
password = request.json.get('password')
# wrong input
if username is None or password is None:
abort(418)
user = User.find_by_username(username)
# user doesn't exist
if user is None:
return abort(418)
# wrong password
if not verify_password(password, user.obj["password_hash"]):
return abort(418)
session["_session"] = str(user.obj['_id'])
response = jsonify({
"user_id": str(user.obj["_id"])
})
response.status_code = 201
return response
class CreateUserEndpoint(Resource):
def post(self):
username = request.json.get('username')
password = request.json.get('password')
# wrong input
if username is None or password is None:
print('username or password is None')
abort(418)
user = User.create(username, password)
if user is None:
print('User was None')
abort(418)
response = jsonify({})
response.status_code = 200
return response
| 32.913043
| 68
| 0.619551
|
from flask_restful import abort, Resource
from flask import request, g, session
from flask.json import jsonify
from whistle_server.models.user import User
def verify_password(password, hashed):
from werkzeug.security import check_password_hash
return check_password_hash(hashed, password)
class LoginEndpoint(Resource):
def post(self):
username = request.json.get('username')
password = request.json.get('password')
if username is None or password is None:
abort(418)
user = User.find_by_username(username)
if user is None:
return abort(418)
# wrong password
if not verify_password(password, user.obj["password_hash"]):
return abort(418)
session["_session"] = str(user.obj['_id'])
response = jsonify({
"user_id": str(user.obj["_id"])
})
response.status_code = 201
return response
class CreateUserEndpoint(Resource):
def post(self):
username = request.json.get('username')
password = request.json.get('password')
# wrong input
if username is None or password is None:
print('username or password is None')
abort(418)
user = User.create(username, password)
if user is None:
print('User was None')
abort(418)
response = jsonify({})
response.status_code = 200
return response
| true
| true
|
f70574814793b965d57be8421a905d367cb6d3c4
| 1,962
|
py
|
Python
|
colour/models/tests/test_cam16_ucs.py
|
JGoldstone/colour
|
6829b363d5f0682bff0f4826995e7ceac189ff28
|
[
"BSD-3-Clause"
] | null | null | null |
colour/models/tests/test_cam16_ucs.py
|
JGoldstone/colour
|
6829b363d5f0682bff0f4826995e7ceac189ff28
|
[
"BSD-3-Clause"
] | null | null | null |
colour/models/tests/test_cam16_ucs.py
|
JGoldstone/colour
|
6829b363d5f0682bff0f4826995e7ceac189ff28
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Defines the unit tests for the :mod:`colour.models.cam16_ucs` module.
"""
import unittest
from colour.models.tests.test_cam02_ucs import (
TestJMh_CIECAM02_to_UCS_Luo2006,
TestUCS_Luo2006_to_JMh_CIECAM02,
TestXYZ_to_UCS_Luo2006,
TestUCS_Luo2006_to_XYZ,
)
__author__ = 'Colour Developers'
__copyright__ = 'Copyright (C) 2013-2021 - Colour Developers'
__license__ = 'New BSD License - https://opensource.org/licenses/BSD-3-Clause'
__maintainer__ = 'Colour Developers'
__email__ = 'colour-developers@colour-science.org'
__status__ = 'Production'
__all__ = [
'TestJMh_CAM16_to_UCS_Li2017',
'TestUCS_Li2017_to_JMh_CAM16',
'TestXYZ_to_UCS_Li2017',
'TestUCS_Li2017_to_XYZ',
]
class TestJMh_CAM16_to_UCS_Li2017(TestJMh_CIECAM02_to_UCS_Luo2006):
"""
Defines :func:`colour.models.cam16_ucs.JMh_CAM16_to_UCS_Li2017`
definition unit tests methods.
Notes
-----
- :func:`colour.models.cam16_ucs.JMh_CAM16_to_UCS_Li2017` is a wrapper
of :func:`colour.models.cam02_ucs.JMh_CIECAM02_to_UCS_Luo2006` and thus
currently adopts the same unittests.
"""
class TestUCS_Li2017_to_JMh_CAM16(TestUCS_Luo2006_to_JMh_CIECAM02):
"""
Defines :func:`colour.models.cam16_ucs.UCS_Li2017_to_JMh_CAM16`
definition unit tests methods.
Notes
-----
- :func:`colour.models.cam16_ucs.UCS_Li2017_to_JMh_CAM16` is a wrapper
of :func:`colour.models.cam02_ucs.UCS_Luo2006_to_JMh_CIECAM02` and thus
currently adopts the same unittests.
"""
class TestXYZ_to_UCS_Li2017(TestXYZ_to_UCS_Luo2006):
"""
Defines :func:`colour.models.cam16_ucs.XYZ_to_UCS_Li2017`
definition unit tests methods.
"""
pass
class TestUCS_Li2017_to_XYZ(TestUCS_Luo2006_to_XYZ):
"""
Defines :func:`colour.models.cam16_ucs.UCS_Li2017_to_XYZ`
definition unit tests methods.
"""
pass
if __name__ == '__main__':
unittest.main()
| 25.815789
| 79
| 0.731397
|
import unittest
from colour.models.tests.test_cam02_ucs import (
TestJMh_CIECAM02_to_UCS_Luo2006,
TestUCS_Luo2006_to_JMh_CIECAM02,
TestXYZ_to_UCS_Luo2006,
TestUCS_Luo2006_to_XYZ,
)
__author__ = 'Colour Developers'
__copyright__ = 'Copyright (C) 2013-2021 - Colour Developers'
__license__ = 'New BSD License - https://opensource.org/licenses/BSD-3-Clause'
__maintainer__ = 'Colour Developers'
__email__ = 'colour-developers@colour-science.org'
__status__ = 'Production'
__all__ = [
'TestJMh_CAM16_to_UCS_Li2017',
'TestUCS_Li2017_to_JMh_CAM16',
'TestXYZ_to_UCS_Li2017',
'TestUCS_Li2017_to_XYZ',
]
class TestJMh_CAM16_to_UCS_Li2017(TestJMh_CIECAM02_to_UCS_Luo2006):
class TestUCS_Li2017_to_JMh_CAM16(TestUCS_Luo2006_to_JMh_CIECAM02):
class TestXYZ_to_UCS_Li2017(TestXYZ_to_UCS_Luo2006):
pass
class TestUCS_Li2017_to_XYZ(TestUCS_Luo2006_to_XYZ):
pass
if __name__ == '__main__':
unittest.main()
| true
| true
|
f7057502a2b9e13dd28680ca2e93edeee96715c1
| 6,723
|
py
|
Python
|
bindings/python/ensmallen_graph/datasets/string/dictyosteliumdiscoideum.py
|
caufieldjh/ensmallen_graph
|
14e98b1cdbc73193a84a913d7d4f2b2b3eb2c43a
|
[
"MIT"
] | null | null | null |
bindings/python/ensmallen_graph/datasets/string/dictyosteliumdiscoideum.py
|
caufieldjh/ensmallen_graph
|
14e98b1cdbc73193a84a913d7d4f2b2b3eb2c43a
|
[
"MIT"
] | null | null | null |
bindings/python/ensmallen_graph/datasets/string/dictyosteliumdiscoideum.py
|
caufieldjh/ensmallen_graph
|
14e98b1cdbc73193a84a913d7d4f2b2b3eb2c43a
|
[
"MIT"
] | null | null | null |
"""
This file offers the methods to automatically retrieve the graph Dictyostelium discoideum.
The graph is automatically retrieved from the STRING repository.
Report
---------------------
At the time of rendering these methods (please see datetime below), the graph
had the following characteristics:
Datetime: 2021-02-02 18:15:05.559120
The undirected graph Dictyostelium discoideum has 10127 nodes and 1406097
weighted edges, of which none are self-loops. The graph is dense as it
has a density of 0.02742 and has 103 connected components, where the component
with most nodes has 9898 nodes and the component with the least nodes has
2 nodes. The graph median node degree is 167, the mean node degree is 277.69,
and the node degree mode is 1. The top 5 most central nodes are 44689.DDB0232950
(degree 2470), 44689.DDB0219986 (degree 2400), 44689.DDB0235316 (degree
2050), 44689.DDB0191503 (degree 2034) and 44689.DDB0235320 (degree 2018).
References
---------------------
Please cite the following if you use the data:
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
Usage example
----------------------
The usage of this graph is relatively straightforward:
.. code:: python
# First import the function to retrieve the graph from the datasets
from ensmallen_graph.datasets.string import DictyosteliumDiscoideum
# Then load the graph
graph = DictyosteliumDiscoideum()
# Finally, you can do anything with it, for instance, compute its report:
print(graph)
# If you need to run a link prediction task with validation,
# you can split the graph using a connected holdout as follows:
train_graph, validation_graph = graph.connected_holdout(
# You can use an 80/20 split the holdout, for example.
train_size=0.8,
# The random state is used to reproduce the holdout.
random_state=42,
# Wether to show a loading bar.
verbose=True
)
# Remember that, if you need, you can enable the memory-time trade-offs:
train_graph.enable(
vector_sources=True,
vector_destinations=True,
vector_outbounds=True
)
# Consider using the methods made available in the Embiggen package
# to run graph embedding or link prediction tasks.
"""
from typing import Dict
from ..automatic_graph_retrieval import AutomaticallyRetrievedGraph
from ...ensmallen_graph import EnsmallenGraph # pylint: disable=import-error
def DictyosteliumDiscoideum(
directed: bool = False,
verbose: int = 2,
cache_path: str = "graphs/string",
**additional_graph_kwargs: Dict
) -> EnsmallenGraph:
"""Return new instance of the Dictyostelium discoideum graph.
The graph is automatically retrieved from the STRING repository.
Parameters
-------------------
directed: bool = False,
Wether to load the graph as directed or undirected.
By default false.
verbose: int = 2,
Wether to show loading bars during the retrieval and building
of the graph.
cache_path: str = "graphs",
Where to store the downloaded graphs.
additional_graph_kwargs: Dict,
Additional graph kwargs.
Returns
-----------------------
Instace of Dictyostelium discoideum graph.
Report
---------------------
At the time of rendering these methods (please see datetime below), the graph
had the following characteristics:
Datetime: 2021-02-02 18:15:05.559120
The undirected graph Dictyostelium discoideum has 10127 nodes and 1406097
weighted edges, of which none are self-loops. The graph is dense as it
has a density of 0.02742 and has 103 connected components, where the component
with most nodes has 9898 nodes and the component with the least nodes has
2 nodes. The graph median node degree is 167, the mean node degree is 277.69,
and the node degree mode is 1. The top 5 most central nodes are 44689.DDB0232950
(degree 2470), 44689.DDB0219986 (degree 2400), 44689.DDB0235316 (degree
2050), 44689.DDB0191503 (degree 2034) and 44689.DDB0235320 (degree 2018).
References
---------------------
Please cite the following if you use the data:
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
Usage example
----------------------
The usage of this graph is relatively straightforward:
.. code:: python
# First import the function to retrieve the graph from the datasets
from ensmallen_graph.datasets.string import DictyosteliumDiscoideum
# Then load the graph
graph = DictyosteliumDiscoideum()
# Finally, you can do anything with it, for instance, compute its report:
print(graph)
# If you need to run a link prediction task with validation,
# you can split the graph using a connected holdout as follows:
train_graph, validation_graph = graph.connected_holdout(
# You can use an 80/20 split the holdout, for example.
train_size=0.8,
# The random state is used to reproduce the holdout.
random_state=42,
# Wether to show a loading bar.
verbose=True
)
# Remember that, if you need, you can enable the memory-time trade-offs:
train_graph.enable(
vector_sources=True,
vector_destinations=True,
vector_outbounds=True
)
# Consider using the methods made available in the Embiggen package
# to run graph embedding or link prediction tasks.
"""
return AutomaticallyRetrievedGraph(
graph_name="DictyosteliumDiscoideum",
dataset="string",
directed=directed,
verbose=verbose,
cache_path=cache_path,
additional_graph_kwargs=additional_graph_kwargs
)()
| 35.571429
| 223
| 0.70534
|
from typing import Dict
from ..automatic_graph_retrieval import AutomaticallyRetrievedGraph
from ...ensmallen_graph import EnsmallenGraph
def DictyosteliumDiscoideum(
directed: bool = False,
verbose: int = 2,
cache_path: str = "graphs/string",
**additional_graph_kwargs: Dict
) -> EnsmallenGraph:
return AutomaticallyRetrievedGraph(
graph_name="DictyosteliumDiscoideum",
dataset="string",
directed=directed,
verbose=verbose,
cache_path=cache_path,
additional_graph_kwargs=additional_graph_kwargs
)()
| true
| true
|
f7057539fa1021d2f984df54ff3175f6e077ac81
| 8,191
|
py
|
Python
|
starlette/staticfiles.py
|
krish-adi/starlette
|
7c7ec5a7f72de360bafa938d14e2e1d6f4b6cb69
|
[
"BSD-3-Clause"
] | 6,974
|
2018-06-25T13:56:49.000Z
|
2022-03-31T21:33:04.000Z
|
starlette/staticfiles.py
|
krish-adi/starlette
|
7c7ec5a7f72de360bafa938d14e2e1d6f4b6cb69
|
[
"BSD-3-Clause"
] | 1,221
|
2018-06-25T15:31:07.000Z
|
2022-03-31T09:14:59.000Z
|
starlette/staticfiles.py
|
krish-adi/starlette
|
7c7ec5a7f72de360bafa938d14e2e1d6f4b6cb69
|
[
"BSD-3-Clause"
] | 810
|
2018-06-25T16:07:52.000Z
|
2022-03-30T16:34:12.000Z
|
import importlib.util
import os
import stat
import typing
from email.utils import parsedate
import anyio
from starlette.datastructures import URL, Headers
from starlette.exceptions import HTTPException
from starlette.responses import FileResponse, RedirectResponse, Response
from starlette.types import Receive, Scope, Send
PathLike = typing.Union[str, "os.PathLike[str]"]
class NotModifiedResponse(Response):
NOT_MODIFIED_HEADERS = (
"cache-control",
"content-location",
"date",
"etag",
"expires",
"vary",
)
def __init__(self, headers: Headers):
super().__init__(
status_code=304,
headers={
name: value
for name, value in headers.items()
if name in self.NOT_MODIFIED_HEADERS
},
)
class StaticFiles:
def __init__(
self,
*,
directory: PathLike = None,
packages: typing.List[str] = None,
html: bool = False,
check_dir: bool = True,
) -> None:
self.directory = directory
self.packages = packages
self.all_directories = self.get_directories(directory, packages)
self.html = html
self.config_checked = False
if check_dir and directory is not None and not os.path.isdir(directory):
raise RuntimeError(f"Directory '{directory}' does not exist")
def get_directories(
self, directory: PathLike = None, packages: typing.List[str] = None
) -> typing.List[PathLike]:
"""
Given `directory` and `packages` arguments, return a list of all the
directories that should be used for serving static files from.
"""
directories = []
if directory is not None:
directories.append(directory)
for package in packages or []:
spec = importlib.util.find_spec(package)
assert spec is not None, f"Package {package!r} could not be found."
assert (
spec.origin is not None
), f"Directory 'statics' in package {package!r} could not be found."
package_directory = os.path.normpath(
os.path.join(spec.origin, "..", "statics")
)
assert os.path.isdir(
package_directory
), f"Directory 'statics' in package {package!r} could not be found."
directories.append(package_directory)
return directories
async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None:
"""
The ASGI entry point.
"""
assert scope["type"] == "http"
if not self.config_checked:
await self.check_config()
self.config_checked = True
path = self.get_path(scope)
response = await self.get_response(path, scope)
await response(scope, receive, send)
def get_path(self, scope: Scope) -> str:
"""
Given the ASGI scope, return the `path` string to serve up,
with OS specific path seperators, and any '..', '.' components removed.
"""
return os.path.normpath(os.path.join(*scope["path"].split("/")))
async def get_response(self, path: str, scope: Scope) -> Response:
"""
Returns an HTTP response, given the incoming path, method and request headers.
"""
if scope["method"] not in ("GET", "HEAD"):
raise HTTPException(status_code=405)
try:
full_path, stat_result = await anyio.to_thread.run_sync(
self.lookup_path, path
)
except PermissionError:
raise HTTPException(status_code=401)
except OSError:
raise
if stat_result and stat.S_ISREG(stat_result.st_mode):
# We have a static file to serve.
return self.file_response(full_path, stat_result, scope)
elif stat_result and stat.S_ISDIR(stat_result.st_mode) and self.html:
# We're in HTML mode, and have got a directory URL.
# Check if we have 'index.html' file to serve.
index_path = os.path.join(path, "index.html")
full_path, stat_result = await anyio.to_thread.run_sync(
self.lookup_path, index_path
)
if stat_result is not None and stat.S_ISREG(stat_result.st_mode):
if not scope["path"].endswith("/"):
# Directory URLs should redirect to always end in "/".
url = URL(scope=scope)
url = url.replace(path=url.path + "/")
return RedirectResponse(url=url)
return self.file_response(full_path, stat_result, scope)
if self.html:
# Check for '404.html' if we're in HTML mode.
full_path, stat_result = await anyio.to_thread.run_sync(
self.lookup_path, "404.html"
)
if stat_result and stat.S_ISREG(stat_result.st_mode):
return FileResponse(
full_path,
stat_result=stat_result,
method=scope["method"],
status_code=404,
)
raise HTTPException(status_code=404)
def lookup_path(
self, path: str
) -> typing.Tuple[str, typing.Optional[os.stat_result]]:
for directory in self.all_directories:
full_path = os.path.realpath(os.path.join(directory, path))
directory = os.path.realpath(directory)
if os.path.commonprefix([full_path, directory]) != directory:
# Don't allow misbehaving clients to break out of the static files
# directory.
continue
try:
return full_path, os.stat(full_path)
except (FileNotFoundError, NotADirectoryError):
continue
return "", None
def file_response(
self,
full_path: PathLike,
stat_result: os.stat_result,
scope: Scope,
status_code: int = 200,
) -> Response:
method = scope["method"]
request_headers = Headers(scope=scope)
response = FileResponse(
full_path, status_code=status_code, stat_result=stat_result, method=method
)
if self.is_not_modified(response.headers, request_headers):
return NotModifiedResponse(response.headers)
return response
async def check_config(self) -> None:
"""
Perform a one-off configuration check that StaticFiles is actually
pointed at a directory, so that we can raise loud errors rather than
just returning 404 responses.
"""
if self.directory is None:
return
try:
stat_result = await anyio.to_thread.run_sync(os.stat, self.directory)
except FileNotFoundError:
raise RuntimeError(
f"StaticFiles directory '{self.directory}' does not exist."
)
if not (stat.S_ISDIR(stat_result.st_mode) or stat.S_ISLNK(stat_result.st_mode)):
raise RuntimeError(
f"StaticFiles path '{self.directory}' is not a directory."
)
def is_not_modified(
self, response_headers: Headers, request_headers: Headers
) -> bool:
"""
Given the request and response headers, return `True` if an HTTP
"Not Modified" response could be returned instead.
"""
try:
if_none_match = request_headers["if-none-match"]
etag = response_headers["etag"]
if if_none_match == etag:
return True
except KeyError:
pass
try:
if_modified_since = parsedate(request_headers["if-modified-since"])
last_modified = parsedate(response_headers["last-modified"])
if (
if_modified_since is not None
and last_modified is not None
and if_modified_since >= last_modified
):
return True
except KeyError:
pass
return False
| 35.154506
| 88
| 0.583201
|
import importlib.util
import os
import stat
import typing
from email.utils import parsedate
import anyio
from starlette.datastructures import URL, Headers
from starlette.exceptions import HTTPException
from starlette.responses import FileResponse, RedirectResponse, Response
from starlette.types import Receive, Scope, Send
PathLike = typing.Union[str, "os.PathLike[str]"]
class NotModifiedResponse(Response):
NOT_MODIFIED_HEADERS = (
"cache-control",
"content-location",
"date",
"etag",
"expires",
"vary",
)
def __init__(self, headers: Headers):
super().__init__(
status_code=304,
headers={
name: value
for name, value in headers.items()
if name in self.NOT_MODIFIED_HEADERS
},
)
class StaticFiles:
def __init__(
self,
*,
directory: PathLike = None,
packages: typing.List[str] = None,
html: bool = False,
check_dir: bool = True,
) -> None:
self.directory = directory
self.packages = packages
self.all_directories = self.get_directories(directory, packages)
self.html = html
self.config_checked = False
if check_dir and directory is not None and not os.path.isdir(directory):
raise RuntimeError(f"Directory '{directory}' does not exist")
def get_directories(
self, directory: PathLike = None, packages: typing.List[str] = None
) -> typing.List[PathLike]:
directories = []
if directory is not None:
directories.append(directory)
for package in packages or []:
spec = importlib.util.find_spec(package)
assert spec is not None, f"Package {package!r} could not be found."
assert (
spec.origin is not None
), f"Directory 'statics' in package {package!r} could not be found."
package_directory = os.path.normpath(
os.path.join(spec.origin, "..", "statics")
)
assert os.path.isdir(
package_directory
), f"Directory 'statics' in package {package!r} could not be found."
directories.append(package_directory)
return directories
async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None:
assert scope["type"] == "http"
if not self.config_checked:
await self.check_config()
self.config_checked = True
path = self.get_path(scope)
response = await self.get_response(path, scope)
await response(scope, receive, send)
def get_path(self, scope: Scope) -> str:
return os.path.normpath(os.path.join(*scope["path"].split("/")))
async def get_response(self, path: str, scope: Scope) -> Response:
if scope["method"] not in ("GET", "HEAD"):
raise HTTPException(status_code=405)
try:
full_path, stat_result = await anyio.to_thread.run_sync(
self.lookup_path, path
)
except PermissionError:
raise HTTPException(status_code=401)
except OSError:
raise
if stat_result and stat.S_ISREG(stat_result.st_mode):
return self.file_response(full_path, stat_result, scope)
elif stat_result and stat.S_ISDIR(stat_result.st_mode) and self.html:
# Check if we have 'index.html' file to serve.
index_path = os.path.join(path, "index.html")
full_path, stat_result = await anyio.to_thread.run_sync(
self.lookup_path, index_path
)
if stat_result is not None and stat.S_ISREG(stat_result.st_mode):
if not scope["path"].endswith("/"):
# Directory URLs should redirect to always end in "/".
url = URL(scope=scope)
url = url.replace(path=url.path + "/")
return RedirectResponse(url=url)
return self.file_response(full_path, stat_result, scope)
if self.html:
# Check for '404.html' if we're in HTML mode.
full_path, stat_result = await anyio.to_thread.run_sync(
self.lookup_path, "404.html"
)
if stat_result and stat.S_ISREG(stat_result.st_mode):
return FileResponse(
full_path,
stat_result=stat_result,
method=scope["method"],
status_code=404,
)
raise HTTPException(status_code=404)
def lookup_path(
self, path: str
) -> typing.Tuple[str, typing.Optional[os.stat_result]]:
for directory in self.all_directories:
full_path = os.path.realpath(os.path.join(directory, path))
directory = os.path.realpath(directory)
if os.path.commonprefix([full_path, directory]) != directory:
# directory.
continue
try:
return full_path, os.stat(full_path)
except (FileNotFoundError, NotADirectoryError):
continue
return "", None
def file_response(
self,
full_path: PathLike,
stat_result: os.stat_result,
scope: Scope,
status_code: int = 200,
) -> Response:
method = scope["method"]
request_headers = Headers(scope=scope)
response = FileResponse(
full_path, status_code=status_code, stat_result=stat_result, method=method
)
if self.is_not_modified(response.headers, request_headers):
return NotModifiedResponse(response.headers)
return response
async def check_config(self) -> None:
if self.directory is None:
return
try:
stat_result = await anyio.to_thread.run_sync(os.stat, self.directory)
except FileNotFoundError:
raise RuntimeError(
f"StaticFiles directory '{self.directory}' does not exist."
)
if not (stat.S_ISDIR(stat_result.st_mode) or stat.S_ISLNK(stat_result.st_mode)):
raise RuntimeError(
f"StaticFiles path '{self.directory}' is not a directory."
)
def is_not_modified(
self, response_headers: Headers, request_headers: Headers
) -> bool:
try:
if_none_match = request_headers["if-none-match"]
etag = response_headers["etag"]
if if_none_match == etag:
return True
except KeyError:
pass
try:
if_modified_since = parsedate(request_headers["if-modified-since"])
last_modified = parsedate(response_headers["last-modified"])
if (
if_modified_since is not None
and last_modified is not None
and if_modified_since >= last_modified
):
return True
except KeyError:
pass
return False
| true
| true
|
f705756c9c7aa72e798711682e387825c99eda61
| 25,822
|
py
|
Python
|
barbican/plugin/interface/secret_store.py
|
lingxiankong/barbican
|
2d2376397d01b26ac2d98c0e02b67dfa0ecc2b1c
|
[
"Apache-2.0"
] | null | null | null |
barbican/plugin/interface/secret_store.py
|
lingxiankong/barbican
|
2d2376397d01b26ac2d98c0e02b67dfa0ecc2b1c
|
[
"Apache-2.0"
] | null | null | null |
barbican/plugin/interface/secret_store.py
|
lingxiankong/barbican
|
2d2376397d01b26ac2d98c0e02b67dfa0ecc2b1c
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2014 Johns Hopkins University Applied Physics Laboratory
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
from oslo_config import cfg
import six
from stevedore import named
from barbican.common import config
from barbican.common import exception
from barbican.common import utils
from barbican import i18n as u
from barbican.plugin.util import multiple_backends
from barbican.plugin.util import utils as plugin_utils
_SECRET_STORE = None
CONF = config.new_config()
DEFAULT_PLUGIN_NAMESPACE = 'barbican.secretstore.plugin'
DEFAULT_PLUGINS = ['store_crypto']
store_opt_group = cfg.OptGroup(name='secretstore',
title='Secret Store Plugin Options')
store_opts = [
cfg.StrOpt('namespace',
default=DEFAULT_PLUGIN_NAMESPACE,
help=u._('Extension namespace to search for plugins.')
),
cfg.MultiStrOpt('enabled_secretstore_plugins',
default=DEFAULT_PLUGINS,
help=u._('List of secret store plugins to load.')
),
cfg.BoolOpt('enable_multiple_secret_stores',
default=False,
help=u._('Flag to enable multiple secret store plugin'
' backend support. Default is False')
),
cfg.ListOpt('stores_lookup_suffix',
help=u._('List of suffix to use for looking up plugins which '
'are supported with multiple backend support.')
)
]
CONF.register_group(store_opt_group)
CONF.register_opts(store_opts, group=store_opt_group)
config.parse_args(CONF)
config.set_module_config("secretstore", CONF)
def list_opts():
yield store_opt_group, store_opts
class SecretStorePluginNotFound(exception.BarbicanHTTPException):
"""Raised when no plugins are installed."""
client_message = u._("No plugin was found that could support your request")
status_code = 400
def __init__(self, plugin_name=None):
if plugin_name:
message = u._('Secret store plugin "{name}"'
' not found.').format(name=plugin_name)
else:
message = u._("Secret store plugin not found.")
super(SecretStorePluginNotFound, self).__init__(message)
class SecretStoreSupportedPluginNotFound(exception.BarbicanHTTPException):
"""Raised when no secret store supported plugin is found."""
client_message = u._("Secret store supported plugin not found.")
status_code = 400
def __init__(self, key_spec):
message = u._("Could not find a secret store plugin for storing "
"secret with algorithm '{alg}' and bit-length "
"'{len}'.").format(alg=key_spec.alg,
len=key_spec.bit_length)
super(SecretStoreSupportedPluginNotFound, self).__init__(
message)
class SecretGenerateSupportedPluginNotFound(exception.BarbicanHTTPException):
"""Raised when no secret generate supported plugin is found."""
client_message = u._("Secret generate supported plugin not found.")
status_code = 400
def __init__(self, key_spec):
message = u._("Could not find a secret store plugin for generating "
"secret with algorithm '{alg}' and bit-length "
"'{len}'.").format(alg=key_spec.alg,
len=key_spec.bit_length)
super(SecretGenerateSupportedPluginNotFound, self).__init__(
message)
class SecretContentTypeNotSupportedException(exception.BarbicanHTTPException):
"""Raised when support for payload content type is not available."""
status_code = 400
def __init__(self, content_type):
super(SecretContentTypeNotSupportedException, self).__init__(
u._("A Content-Type of '{content_type}' for secrets is "
"not supported").format(
content_type=content_type)
)
self.content_type = content_type
self.client_message = u._(
"content-type of '{content_type}' not supported").format(
content_type=content_type)
class SecretContentEncodingNotSupportedException(
exception.BarbicanHTTPException):
"""Raised when support for payload content encoding is not available."""
status_code = 400
def __init__(self, content_encoding):
super(SecretContentEncodingNotSupportedException, self).__init__(
u._("Secret Content-Encoding of '{content_encoding}' "
"not supported").format(
content_encoding=content_encoding)
)
self.content_encoding = content_encoding
self.client_message = u._(
"content-encoding of '{content_encoding}' not supported").format(
content_encoding=content_encoding)
class SecretNoPayloadProvidedException(exception.BarbicanException):
"""Raised when secret information is not provided."""
def __init__(self):
super(SecretNoPayloadProvidedException, self).__init__(
u._('No secret information provided to encrypt.')
)
class SecretContentEncodingMustBeBase64(exception.BarbicanHTTPException):
"""Raised when encoding must be base64."""
client_message = u._("Text-based binary secret payloads must "
"specify a content-encoding of 'base64'")
status_code = 400
def __init__(self):
super(SecretContentEncodingMustBeBase64, self).__init__(
u._("Encoding type must be 'base64' for text-based payloads.")
)
class SecretGeneralException(exception.BarbicanException):
"""Raised when a system fault has occurred."""
def __init__(self, reason=u._('Unknown')):
super(SecretGeneralException, self).__init__(
u._('Problem seen during crypto processing - '
'Reason: {reason}').format(reason=reason)
)
self.reason = reason
class SecretPayloadDecodingError(exception.BarbicanHTTPException):
"""Raised when payload could not be decoded."""
client_message = u._("Problem decoding payload")
status_code = 400
def __init__(self):
super(SecretPayloadDecodingError, self).__init__(
u._("Problem decoding payload")
)
class SecretAcceptNotSupportedException(exception.BarbicanHTTPException):
"""Raised when requested decrypted content-type is not available."""
client_message = u._("Wrong payload content-type")
status_code = 406
def __init__(self, accept):
super(SecretAcceptNotSupportedException, self).__init__(
u._("Secret Accept of '{accept}' not supported").format(
accept=accept)
)
self.accept = accept
class SecretNotFoundException(exception.BarbicanHTTPException):
"""Raised when secret information could not be located."""
client_message = u._("Not Found. Sorry but your secret is in another "
"castle")
status_code = 404
def __init__(self):
super(SecretNotFoundException, self).__init__(
u._('No secret information found'))
class SecretAlgorithmNotSupportedException(exception.BarbicanHTTPException):
"""Raised when support for an algorithm is not available."""
client_message = u._("Requested algorithm is not supported")
status_code = 400
def __init__(self, algorithm):
super(SecretAlgorithmNotSupportedException, self).__init__(
u._("Secret algorithm of '{algorithm}' not supported").format(
algorithm=algorithm)
)
self.algorithm = algorithm
class GeneratePassphraseNotSupportedException(exception.BarbicanHTTPException):
"""Raised when generating keys encrypted by passphrase is not supported."""
client_message = (
u._("Generating keys encrypted with passphrases is not supported")
)
status_code = 400
def __init__(self):
super(GeneratePassphraseNotSupportedException, self).__init__(
self.client_message
)
class SecretStorePluginsNotConfigured(exception.BarbicanException):
"""Raised when there are no secret store plugins configured."""
def __init__(self):
super(SecretStorePluginsNotConfigured, self).__init__(
u._('No secret store plugins have been configured')
)
class StorePluginNotAvailableOrMisconfigured(exception.BarbicanException):
"""Raised when a plugin that was previously used can not be found."""
def __init__(self, plugin_name):
super(StorePluginNotAvailableOrMisconfigured, self).__init__(
u._("The requested Store Plugin {plugin_name} is not "
"currently available. This is probably a server "
"misconfiguration.").format(
plugin_name=plugin_name)
)
self.plugin_name = plugin_name
class SecretType(object):
"""Constant to define the symmetric key type.
Used by getSecret to retrieve a symmetric key.
"""
SYMMETRIC = "symmetric"
"""Constant to define the public key type. Used by getSecret to retrieve a
public key.
"""
PUBLIC = "public"
"""Constant to define the private key type. Used by getSecret to retrieve a
private key.
"""
PRIVATE = "private"
"""Constant to define the passphrase type. Used by getSecret to retrieve a
passphrase."""
PASSPHRASE = "passphrase" # nosec
"""Constant to define the certificate type. Used by getSecret to retrieve a
certificate."""
CERTIFICATE = "certificate"
"""Constant to define the opaque date type. Used by getSecret to retrieve
opaque data. Opaque data can be any kind of data. This data type signals to
Barbican to just store the information and do not worry about the format or
encoding. This is the default type if no type is specified by the user."""
OPAQUE = utils.SECRET_TYPE_OPAQUE
class KeyAlgorithm(object):
"""Constant for the Diffie Hellman algorithm."""
DIFFIE_HELLMAN = "diffie_hellman"
"""Constant for the DSA algorithm."""
DSA = "dsa"
"""Constant for the RSA algorithm."""
RSA = "rsa"
"""Constant for the Elliptic Curve algorithm."""
EC = "ec"
"""Constant for the HMACSHA1 algorithm."""
HMACSHA1 = "hmacsha1"
"""Constant for the HMACSHA256 algorithm."""
HMACSHA256 = "hmacsha256"
"""Constant for the HMACSHA384 algorithm."""
HMACSHA384 = "hmacsha384"
"""Constant for the HMACSHA512 algorithm."""
HMACSHA512 = "hmacsha512"
"""List of asymmetric algorithms"""
ASYMMETRIC_ALGORITHMS = [DIFFIE_HELLMAN, DSA, RSA, EC]
"""Constant for the AES algorithm."""
AES = "aes"
"""Constant for the DES algorithm."""
DES = "des"
"""Constant for the DESede (triple-DES) algorithm."""
DESEDE = "desede"
"""List of symmetric algorithms"""
SYMMETRIC_ALGORITHMS = [AES, DES, DESEDE, HMACSHA1,
HMACSHA256, HMACSHA384, HMACSHA512]
class KeySpec(object):
"""This object specifies the algorithm and bit length for a key."""
def __init__(self, alg=None, bit_length=None, mode=None, passphrase=None):
"""Creates a new KeySpec.
:param alg:algorithm for the key
:param bit_length:bit length of the key
:param mode:algorithm mode for the key
:param passphrase:passphrase for the private_key
"""
self.alg = alg
self.bit_length = bit_length
self.mode = mode # TODO(john-wood-w) Paul, is 'mode' required?
self.passphrase = passphrase
class SecretDTO(object):
"""This object is a secret data transfer object (DTO).
This object encapsulates a key and attributes about the key. The attributes
include a KeySpec that contains the algorithm and bit length. The
attributes also include information on the encoding of the key.
"""
# TODO(john-wood-w) Remove 'content_type' once secret normalization work is
# completed.
def __init__(self, type, secret, key_spec, content_type,
transport_key=None):
"""Creates a new SecretDTO.
The secret is stored in the secret parameter. In the future this
DTO may include compression and key wrapping information.
:param type: SecretType for secret
:param secret: secret, as a base64-encoded string
:param key_spec: KeySpec key specifications
:param content_type: Content type of the secret, one of MIME
types such as 'text/plain' or 'application/octet-stream'
:param transport_key: presence of this parameter indicates that the
secret has been encrypted using a transport key. The transport
key is a base64 encoded x509 transport certificate.
"""
self.type = type or SecretType.OPAQUE
self.secret = secret
self.key_spec = key_spec
self.content_type = content_type
self.transport_key = transport_key
class AsymmetricKeyMetadataDTO(object):
"""This DTO encapsulates metadata(s) for asymmetric key components.
These components are private_key_meta, public_key_meta and passphrase_meta.
"""
def __init__(self, private_key_meta=None,
public_key_meta=None,
passphrase_meta=None):
"""Constructor for AsymmetricKeyMetadataDTO
:param private_key_meta: private key metadata
:param public_key_meta: public key metadata
:param passphrase_meta: passphrase key metadata
"""
self.private_key_meta = private_key_meta
self.public_key_meta = public_key_meta
self.passphrase_meta = passphrase_meta
@six.add_metaclass(abc.ABCMeta)
class SecretStoreBase(object):
@abc.abstractmethod
def get_plugin_name(self):
"""Gets user friendly plugin name.
This plugin name is expected to be read from config file.
There will be a default defined for plugin name which can be customized
in specific deployment if needed.
This name needs to be unique across a deployment.
"""
raise NotImplementedError # pragma: no cover
@abc.abstractmethod
def generate_symmetric_key(self, key_spec):
"""Generate a new symmetric key and store it.
Generates a new symmetric key and stores it in the secret store.
A dictionary is returned that contains metadata about the newly created
symmetric key. The dictionary of metadata is stored by Barbican and
passed into other methods to aid the plugins. This can be useful for
plugins that generate a unique ID in the external data store and use it
to retrieve the key in the future. The returned dictionary may be empty
if the SecretStore does not require it.
:param key_spec: KeySpec that contains details on the type of key to
generate
:returns: an optional dictionary containing metadata about the key
"""
raise NotImplementedError # pragma: no cover
@abc.abstractmethod
def generate_asymmetric_key(self, key_spec):
"""Generate a new asymmetric key pair and store it.
Generates a new asymmetric key pair and stores it in the secret
store. An object of type AsymmetricKeyMetadataDTO will be returned
containing attributes of metadata for newly created key pairs.
The metadata is stored by Barbican and passed into other methods
to aid the plugins. This can be useful for plugins that generate
a unique ID in the external data store and use it to retrieve the
key pairs in the future.
:param key_spec: KeySpec that contains details on the type of key to
generate
:returns: An object of type AsymmetricKeyMetadataDTO containing
metadata about the key pair.
"""
raise NotImplementedError # pragma: no cover
@abc.abstractmethod
def store_secret(self, secret_dto):
"""Stores a key.
The SecretDTO contains the bytes of the secret and properties of the
secret. The SecretStore retrieves the secret bytes, stores them, and
returns a dictionary of metadata about the secret. This can be
useful for plugins that generate a unique ID in the external data
store and use it to retrieve the secret in the future. The returned
dictionary may be empty if the SecretStore does not require it.
:param secret_dto: SecretDTO for secret
:returns: an optional dictionary containing metadata about the secret
"""
raise NotImplementedError # pragma: no cover
@abc.abstractmethod
def get_secret(self, secret_type, secret_metadata):
"""Retrieves a secret from the secret store.
Retrieves a secret from the secret store and returns a SecretDTO that
contains the secret.
The secret_metadata parameter is the metadata returned from one of the
generate or store methods. This data is used by the plugins to retrieve
the key.
The secret_type parameter may be useful for secret stores to know the
expected format of the secret. For instance if the type is
SecretDTO.PRIVATE then a PKCS8 structure is returned. This way secret
stores do not need to manage the secret type on their own.
:param secret_type: secret type
:param secret_metadata: secret metadata
:returns: SecretDTO that contains secret
"""
raise NotImplementedError # pragma: no cover
@abc.abstractmethod
def generate_supports(self, key_spec):
"""Returns a boolean indicating if the secret type is supported.
This checks if the algorithm and bit length are supported by the
generate methods. This is useful to call before calling
generate_symmetric_key or generate_asymetric_key to see if the key type
is supported before trying to generate it.
:param key_spec: KeySpec that contains details on the algorithm and bit
length
:returns: boolean indicating if the algorithm is supported
"""
raise NotImplementedError # pragma: no cover
@abc.abstractmethod
def delete_secret(self, secret_metadata):
"""Deletes a secret from the secret store.
Deletes a secret from a secret store. It can no longer be referenced
after this call.
:param secret_metadata: secret_metadata
"""
raise NotImplementedError # pragma: no cover
@abc.abstractmethod
def store_secret_supports(self, key_spec):
"""Returns a boolean indicating if the secret can be stored.
Checks if the secret store can store the secret, give the attributes
of the secret in the KeySpec. For example, some plugins may need to
know the attributes in order to store the secret, but other plugins
may be able to store the secret as a blob if no attributes are given.
:param key_spec: KeySpec for the secret
:returns: a boolean indicating if the secret can be stored
"""
raise NotImplementedError # pragma: no cover
def get_transport_key(self):
"""Gets a transport key.
Returns the current valid transport key associated with this plugin.
The transport key is expected to be a base64 encoded x509 certificate
containing a public key. Admins are responsible for deleting old keys
from the database using the DELETE method on the TransportKey resource.
By default, returns None. Plugins that support transport key
wrapping should override this method.
"""
return None
def is_transport_key_current(self, transport_key):
"""Determines if the provided transport key is the current valid key
Returns true if the transport key is the current valid transport key.
If the key is not valid, then barbican core will request a new
transport key from the plugin.
Returns False by default. Plugins that support transport key wrapping
should override this method.
"""
return False
def _enforce_extensions_configured(plugin_related_function):
def _check_plugins_configured(self, *args, **kwargs):
if not self.extensions:
raise SecretStorePluginsNotConfigured()
return plugin_related_function(self, *args, **kwargs)
return _check_plugins_configured
class SecretStorePluginManager(named.NamedExtensionManager):
def __init__(self, conf=CONF, invoke_args=(), invoke_kwargs={}):
ss_conf = config.get_module_config('secretstore')
plugin_names = self._get_internal_plugin_names(ss_conf)
super(SecretStorePluginManager, self).__init__(
ss_conf.secretstore.namespace,
plugin_names,
invoke_on_load=False, # Defer creating plugins to utility below.
invoke_args=invoke_args,
invoke_kwds=invoke_kwargs,
name_order=True # extensions sorted as per order of plugin names
)
plugin_utils.instantiate_plugins(self, invoke_args, invoke_kwargs)
multiple_backends.sync_secret_stores(self)
@_enforce_extensions_configured
def get_plugin_store(self, key_spec, plugin_name=None,
transport_key_needed=False, project_id=None):
"""Gets a secret store plugin.
:param: plugin_name: set to plugin_name to get specific plugin
:param: key_spec: KeySpec of key that will be stored
:param: transport_key_needed: set to True if a transport
key is required.
:returns: SecretStoreBase plugin implementation
"""
active_plugins = multiple_backends.get_applicable_store_plugins(
self, project_id=project_id, existing_plugin_name=plugin_name)
if plugin_name is not None:
for plugin in active_plugins:
if utils.generate_fullname_for(plugin) == plugin_name:
return plugin
raise SecretStorePluginNotFound(plugin_name)
if not transport_key_needed:
for plugin in active_plugins:
if plugin.store_secret_supports(key_spec):
return plugin
else:
for plugin in active_plugins:
if (plugin.get_transport_key() is not None and
plugin.store_secret_supports(key_spec)):
return plugin
raise SecretStoreSupportedPluginNotFound(key_spec)
@_enforce_extensions_configured
def get_plugin_retrieve_delete(self, plugin_name):
"""Gets a secret retrieve/delete plugin.
If this function is being called, it is because we are trying to
retrieve or delete an already stored secret. Thus, the plugin name is
actually gotten from the plugin metadata that has already been stored
in the database. So, in this case, if this plugin is not available,
this might be due to a server misconfiguration.
:returns: SecretStoreBase plugin implementation
:raises: StorePluginNotAvailableOrMisconfigured: If the plugin wasn't
found it's because the plugin parameters were not properly
configured on the database side.
"""
for plugin in plugin_utils.get_active_plugins(self):
if utils.generate_fullname_for(plugin) == plugin_name:
return plugin
raise StorePluginNotAvailableOrMisconfigured(plugin_name)
@_enforce_extensions_configured
def get_plugin_generate(self, key_spec, project_id=None):
"""Gets a secret generate plugin.
:param key_spec: KeySpec that contains details on the type of key to
generate
:returns: SecretStoreBase plugin implementation
"""
active_plugins = multiple_backends.get_applicable_store_plugins(
self, project_id=project_id, existing_plugin_name=None)
for plugin in active_plugins:
if plugin.generate_supports(key_spec):
return plugin
raise SecretGenerateSupportedPluginNotFound(key_spec)
def _get_internal_plugin_names(self, secretstore_conf):
"""Gets plugin names used for loading via stevedore.
When multiple secret store support is enabled, then secret store plugin
names are read via updated configuration structure. If not enabled,
then it reads MultiStr property in 'secretstore' config section.
"""
# to cache default global secret store value on first use
self.global_default_store_dict = None
if utils.is_multiple_backends_enabled():
self.parsed_stores = multiple_backends.\
read_multiple_backends_config()
plugin_names = [store.store_plugin for store in self.parsed_stores
if store.store_plugin]
else:
plugin_names = secretstore_conf.secretstore.\
enabled_secretstore_plugins
return plugin_names
def get_manager():
global _SECRET_STORE
if not _SECRET_STORE:
_SECRET_STORE = SecretStorePluginManager()
return _SECRET_STORE
| 38.482861
| 79
| 0.676594
|
import abc
from oslo_config import cfg
import six
from stevedore import named
from barbican.common import config
from barbican.common import exception
from barbican.common import utils
from barbican import i18n as u
from barbican.plugin.util import multiple_backends
from barbican.plugin.util import utils as plugin_utils
_SECRET_STORE = None
CONF = config.new_config()
DEFAULT_PLUGIN_NAMESPACE = 'barbican.secretstore.plugin'
DEFAULT_PLUGINS = ['store_crypto']
store_opt_group = cfg.OptGroup(name='secretstore',
title='Secret Store Plugin Options')
store_opts = [
cfg.StrOpt('namespace',
default=DEFAULT_PLUGIN_NAMESPACE,
help=u._('Extension namespace to search for plugins.')
),
cfg.MultiStrOpt('enabled_secretstore_plugins',
default=DEFAULT_PLUGINS,
help=u._('List of secret store plugins to load.')
),
cfg.BoolOpt('enable_multiple_secret_stores',
default=False,
help=u._('Flag to enable multiple secret store plugin'
' backend support. Default is False')
),
cfg.ListOpt('stores_lookup_suffix',
help=u._('List of suffix to use for looking up plugins which '
'are supported with multiple backend support.')
)
]
CONF.register_group(store_opt_group)
CONF.register_opts(store_opts, group=store_opt_group)
config.parse_args(CONF)
config.set_module_config("secretstore", CONF)
def list_opts():
yield store_opt_group, store_opts
class SecretStorePluginNotFound(exception.BarbicanHTTPException):
client_message = u._("No plugin was found that could support your request")
status_code = 400
def __init__(self, plugin_name=None):
if plugin_name:
message = u._('Secret store plugin "{name}"'
' not found.').format(name=plugin_name)
else:
message = u._("Secret store plugin not found.")
super(SecretStorePluginNotFound, self).__init__(message)
class SecretStoreSupportedPluginNotFound(exception.BarbicanHTTPException):
client_message = u._("Secret store supported plugin not found.")
status_code = 400
def __init__(self, key_spec):
message = u._("Could not find a secret store plugin for storing "
"secret with algorithm '{alg}' and bit-length "
"'{len}'.").format(alg=key_spec.alg,
len=key_spec.bit_length)
super(SecretStoreSupportedPluginNotFound, self).__init__(
message)
class SecretGenerateSupportedPluginNotFound(exception.BarbicanHTTPException):
client_message = u._("Secret generate supported plugin not found.")
status_code = 400
def __init__(self, key_spec):
message = u._("Could not find a secret store plugin for generating "
"secret with algorithm '{alg}' and bit-length "
"'{len}'.").format(alg=key_spec.alg,
len=key_spec.bit_length)
super(SecretGenerateSupportedPluginNotFound, self).__init__(
message)
class SecretContentTypeNotSupportedException(exception.BarbicanHTTPException):
status_code = 400
def __init__(self, content_type):
super(SecretContentTypeNotSupportedException, self).__init__(
u._("A Content-Type of '{content_type}' for secrets is "
"not supported").format(
content_type=content_type)
)
self.content_type = content_type
self.client_message = u._(
"content-type of '{content_type}' not supported").format(
content_type=content_type)
class SecretContentEncodingNotSupportedException(
exception.BarbicanHTTPException):
status_code = 400
def __init__(self, content_encoding):
super(SecretContentEncodingNotSupportedException, self).__init__(
u._("Secret Content-Encoding of '{content_encoding}' "
"not supported").format(
content_encoding=content_encoding)
)
self.content_encoding = content_encoding
self.client_message = u._(
"content-encoding of '{content_encoding}' not supported").format(
content_encoding=content_encoding)
class SecretNoPayloadProvidedException(exception.BarbicanException):
def __init__(self):
super(SecretNoPayloadProvidedException, self).__init__(
u._('No secret information provided to encrypt.')
)
class SecretContentEncodingMustBeBase64(exception.BarbicanHTTPException):
client_message = u._("Text-based binary secret payloads must "
"specify a content-encoding of 'base64'")
status_code = 400
def __init__(self):
super(SecretContentEncodingMustBeBase64, self).__init__(
u._("Encoding type must be 'base64' for text-based payloads.")
)
class SecretGeneralException(exception.BarbicanException):
def __init__(self, reason=u._('Unknown')):
super(SecretGeneralException, self).__init__(
u._('Problem seen during crypto processing - '
'Reason: {reason}').format(reason=reason)
)
self.reason = reason
class SecretPayloadDecodingError(exception.BarbicanHTTPException):
client_message = u._("Problem decoding payload")
status_code = 400
def __init__(self):
super(SecretPayloadDecodingError, self).__init__(
u._("Problem decoding payload")
)
class SecretAcceptNotSupportedException(exception.BarbicanHTTPException):
client_message = u._("Wrong payload content-type")
status_code = 406
def __init__(self, accept):
super(SecretAcceptNotSupportedException, self).__init__(
u._("Secret Accept of '{accept}' not supported").format(
accept=accept)
)
self.accept = accept
class SecretNotFoundException(exception.BarbicanHTTPException):
client_message = u._("Not Found. Sorry but your secret is in another "
"castle")
status_code = 404
def __init__(self):
super(SecretNotFoundException, self).__init__(
u._('No secret information found'))
class SecretAlgorithmNotSupportedException(exception.BarbicanHTTPException):
client_message = u._("Requested algorithm is not supported")
status_code = 400
def __init__(self, algorithm):
super(SecretAlgorithmNotSupportedException, self).__init__(
u._("Secret algorithm of '{algorithm}' not supported").format(
algorithm=algorithm)
)
self.algorithm = algorithm
class GeneratePassphraseNotSupportedException(exception.BarbicanHTTPException):
client_message = (
u._("Generating keys encrypted with passphrases is not supported")
)
status_code = 400
def __init__(self):
super(GeneratePassphraseNotSupportedException, self).__init__(
self.client_message
)
class SecretStorePluginsNotConfigured(exception.BarbicanException):
def __init__(self):
super(SecretStorePluginsNotConfigured, self).__init__(
u._('No secret store plugins have been configured')
)
class StorePluginNotAvailableOrMisconfigured(exception.BarbicanException):
def __init__(self, plugin_name):
super(StorePluginNotAvailableOrMisconfigured, self).__init__(
u._("The requested Store Plugin {plugin_name} is not "
"currently available. This is probably a server "
"misconfiguration.").format(
plugin_name=plugin_name)
)
self.plugin_name = plugin_name
class SecretType(object):
SYMMETRIC = "symmetric"
PUBLIC = "public"
PRIVATE = "private"
PASSPHRASE = "passphrase"
CERTIFICATE = "certificate"
OPAQUE = utils.SECRET_TYPE_OPAQUE
class KeyAlgorithm(object):
DIFFIE_HELLMAN = "diffie_hellman"
DSA = "dsa"
RSA = "rsa"
EC = "ec"
HMACSHA1 = "hmacsha1"
HMACSHA256 = "hmacsha256"
HMACSHA384 = "hmacsha384"
HMACSHA512 = "hmacsha512"
ASYMMETRIC_ALGORITHMS = [DIFFIE_HELLMAN, DSA, RSA, EC]
AES = "aes"
DES = "des"
DESEDE = "desede"
SYMMETRIC_ALGORITHMS = [AES, DES, DESEDE, HMACSHA1,
HMACSHA256, HMACSHA384, HMACSHA512]
class KeySpec(object):
def __init__(self, alg=None, bit_length=None, mode=None, passphrase=None):
self.alg = alg
self.bit_length = bit_length
self.mode = mode
self.passphrase = passphrase
class SecretDTO(object):
def __init__(self, type, secret, key_spec, content_type,
transport_key=None):
self.type = type or SecretType.OPAQUE
self.secret = secret
self.key_spec = key_spec
self.content_type = content_type
self.transport_key = transport_key
class AsymmetricKeyMetadataDTO(object):
def __init__(self, private_key_meta=None,
public_key_meta=None,
passphrase_meta=None):
self.private_key_meta = private_key_meta
self.public_key_meta = public_key_meta
self.passphrase_meta = passphrase_meta
@six.add_metaclass(abc.ABCMeta)
class SecretStoreBase(object):
@abc.abstractmethod
def get_plugin_name(self):
raise NotImplementedError
@abc.abstractmethod
def generate_symmetric_key(self, key_spec):
raise NotImplementedError
@abc.abstractmethod
def generate_asymmetric_key(self, key_spec):
raise NotImplementedError
@abc.abstractmethod
def store_secret(self, secret_dto):
raise NotImplementedError
@abc.abstractmethod
def get_secret(self, secret_type, secret_metadata):
raise NotImplementedError
@abc.abstractmethod
def generate_supports(self, key_spec):
raise NotImplementedError
@abc.abstractmethod
def delete_secret(self, secret_metadata):
raise NotImplementedError
@abc.abstractmethod
def store_secret_supports(self, key_spec):
raise NotImplementedError
def get_transport_key(self):
return None
def is_transport_key_current(self, transport_key):
return False
def _enforce_extensions_configured(plugin_related_function):
def _check_plugins_configured(self, *args, **kwargs):
if not self.extensions:
raise SecretStorePluginsNotConfigured()
return plugin_related_function(self, *args, **kwargs)
return _check_plugins_configured
class SecretStorePluginManager(named.NamedExtensionManager):
def __init__(self, conf=CONF, invoke_args=(), invoke_kwargs={}):
ss_conf = config.get_module_config('secretstore')
plugin_names = self._get_internal_plugin_names(ss_conf)
super(SecretStorePluginManager, self).__init__(
ss_conf.secretstore.namespace,
plugin_names,
invoke_on_load=False,
invoke_args=invoke_args,
invoke_kwds=invoke_kwargs,
name_order=True
)
plugin_utils.instantiate_plugins(self, invoke_args, invoke_kwargs)
multiple_backends.sync_secret_stores(self)
@_enforce_extensions_configured
def get_plugin_store(self, key_spec, plugin_name=None,
transport_key_needed=False, project_id=None):
active_plugins = multiple_backends.get_applicable_store_plugins(
self, project_id=project_id, existing_plugin_name=plugin_name)
if plugin_name is not None:
for plugin in active_plugins:
if utils.generate_fullname_for(plugin) == plugin_name:
return plugin
raise SecretStorePluginNotFound(plugin_name)
if not transport_key_needed:
for plugin in active_plugins:
if plugin.store_secret_supports(key_spec):
return plugin
else:
for plugin in active_plugins:
if (plugin.get_transport_key() is not None and
plugin.store_secret_supports(key_spec)):
return plugin
raise SecretStoreSupportedPluginNotFound(key_spec)
@_enforce_extensions_configured
def get_plugin_retrieve_delete(self, plugin_name):
for plugin in plugin_utils.get_active_plugins(self):
if utils.generate_fullname_for(plugin) == plugin_name:
return plugin
raise StorePluginNotAvailableOrMisconfigured(plugin_name)
@_enforce_extensions_configured
def get_plugin_generate(self, key_spec, project_id=None):
active_plugins = multiple_backends.get_applicable_store_plugins(
self, project_id=project_id, existing_plugin_name=None)
for plugin in active_plugins:
if plugin.generate_supports(key_spec):
return plugin
raise SecretGenerateSupportedPluginNotFound(key_spec)
def _get_internal_plugin_names(self, secretstore_conf):
self.global_default_store_dict = None
if utils.is_multiple_backends_enabled():
self.parsed_stores = multiple_backends.\
read_multiple_backends_config()
plugin_names = [store.store_plugin for store in self.parsed_stores
if store.store_plugin]
else:
plugin_names = secretstore_conf.secretstore.\
enabled_secretstore_plugins
return plugin_names
def get_manager():
global _SECRET_STORE
if not _SECRET_STORE:
_SECRET_STORE = SecretStorePluginManager()
return _SECRET_STORE
| true
| true
|
f70576272db6e715ab91b4b6cd21f28a6e4e23c0
| 16,545
|
py
|
Python
|
spirl/rl/components/agent.py
|
kouroshHakha/fist
|
328c098789239fd892e17edefd799fc1957ab637
|
[
"BSD-3-Clause"
] | 8
|
2021-10-14T03:14:23.000Z
|
2022-03-15T21:31:17.000Z
|
spirl/rl/components/agent.py
|
kouroshHakha/fist
|
328c098789239fd892e17edefd799fc1957ab637
|
[
"BSD-3-Clause"
] | null | null | null |
spirl/rl/components/agent.py
|
kouroshHakha/fist
|
328c098789239fd892e17edefd799fc1957ab637
|
[
"BSD-3-Clause"
] | 1
|
2021-09-13T20:42:28.000Z
|
2021-09-13T20:42:28.000Z
|
import os
import torch
import torch.nn as nn
import numpy as np
from contextlib import contextmanager
from functools import partial
from torch.optim import Adam, SGD
from spirl.utils.general_utils import ParamDict, get_clipped_optimizer, AttrDict, prefix_dict, map_dict, \
nan_hook, np2obj, ConstantSchedule
from spirl.utils.pytorch_utils import RAdam, remove_grads, map2np, map2torch
from spirl.utils.vis_utils import add_caption_to_img, add_captions_to_seq
from spirl.rl.components.normalization import DummyNormalizer
from spirl.rl.components.policy import Policy
from spirl.components.checkpointer import CheckpointHandler
from spirl.rl.utils.mpi import sync_grads
class BaseAgent(nn.Module):
def __init__(self, config):
super().__init__()
self._hp = self._default_hparams().overwrite(config)
self.device = self._hp.device
self._is_train = True # indicates whether agent should sample in training mode
self._rand_act_mode = False # indicates whether agent should act randomly (for warmup collection)
self._rollout_mode = False # indicates whether agent is run in rollout mode (omit certain policy outputs)
self._obs_normalizer = self._hp.obs_normalizer(self._hp.obs_normalizer_params)
def _default_hparams(self):
default_dict = ParamDict({
'device': None, # pytorch device
'discount_factor': 0.99, # discount factor for RL update
'optimizer': 'adam', # supported: 'adam', 'radam', 'rmsprop', 'sgd'
'gradient_clip': None, # max grad norm, if None no clipping
'momentum': 0, # momentum in RMSProp / SGD optimizer
'adam_beta': 0.9, # beta1 param in Adam
'update_iterations': 1, # number of iteration steps per one call to 'update(...)'
'target_network_update_factor': 5e-3, # percentage of new weights that are carried over
'batch_size': 64, # size of the experience batch used for updates
'obs_normalizer': DummyNormalizer, # observation normalization class
'obs_normalizer_params': {}, # parameters for optimization norm class
'obs_norm_log_groups': {}, # (optional) dict defining separation of state space for obsNormLog
'log_videos': True, # whether to log videos during logging
'log_video_caption': False, # whether to add captions to video
'num_workers': None, # number of independent workers --> whether grads need sync
})
return default_dict
def act(self, obs):
"""Returns policy output dict given observation (random action if self._rand_act_mode is set)."""
if self._rand_act_mode:
return self._act_rand(obs)
else:
return self._act(obs)
def _act(self, obs):
"""Implements act method in child class."""
raise NotImplementedError
def _act_rand(self, obs):
"""Returns random action with proper dimension. Implemented in child class."""
raise NotImplementedError
def update(self, experience_batch):
"""Updates the policy given a batch of experience."""
raise NotImplementedError
def add_experience(self, experience_batch):
"""Provides interface for adding additional experience to agent replay, needs to be overwritten by child."""
print("### This agent does not support additional experience! ###")
def log_outputs(self, logging_stats, rollout_storage, logger, log_images, step):
"""Visualizes/logs all training outputs."""
logger.log_scalar_dict(logging_stats, prefix='train' if self._is_train else 'val', step=step)
if log_images:
assert rollout_storage is not None # need rollout data for image logging
# log rollout videos with info captions
if 'image' in rollout_storage and self._hp.log_videos:
if self._hp.log_video_caption:
vids = [np.stack(add_captions_to_seq(rollout.image, np2obj(rollout.info))).transpose(0, 3, 1, 2)
for rollout in rollout_storage.get()[-logger.n_logged_samples:]]
else:
vids = [np.stack(rollout.image).transpose(0, 3, 1, 2)
for rollout in rollout_storage.get()[-logger.n_logged_samples:]]
logger.log_videos(vids, name="rollouts", step=step)
self.visualize(logger, rollout_storage, step)
def visualize(self, logger, rollout_storage, step):
"""Optionally allows to further visualize the internal state of agent (e.g. replay buffer etc.)"""
pass
def reset(self):
"""Can be used for any initializations of agent's state at beginning of episode."""
pass
def save_state(self, save_dir):
"""Provides interface to save any internal state variables (like replay buffers) to disk."""
pass
def load_state(self, save_dir):
"""Provides interface to load any internal state variables (like replay buffers) from disk."""
pass
def sync_networks(self):
"""Syncs network parameters across workers."""
raise NotImplementedError
def _soft_update_target_network(self, target, source):
"""Copies weights from source to target with weight [0,1]."""
for target_param, param in zip(target.parameters(), source.parameters()):
target_param.data.copy_(self._hp.target_network_update_factor * param.data +
(1 - self._hp.target_network_update_factor) * target_param.data)
def _copy_to_target_network(self, target, source):
"""Completely copies weights from source to target."""
for target_param, source_param in zip(target.parameters(), source.parameters()):
target_param.data.copy_(source_param.data)
def _get_optimizer(self, optimizer, model, lr):
"""Returns an instance of the specified optimizers on the parameters of the model with specified learning rate."""
if optimizer == 'adam':
get_optim = partial(get_clipped_optimizer, optimizer_type=Adam, betas=(self._hp.adam_beta, 0.999))
elif optimizer == 'radam':
get_optim = partial(get_clipped_optimizer, optimizer_type=RAdam, betas=(self._hp.adam_beta, 0.999))
elif optimizer == 'sgd':
get_optim = partial(get_clipped_optimizer, optimizer_type=SGD, momentum=self._hp.momentum)
else:
raise ValueError("Optimizer '{}' not supported!".format(optimizer))
optim = partial(get_optim, gradient_clip=self._hp.gradient_clip)
return optim(filter(lambda p: p.requires_grad, model.parameters()), lr=lr)
def _perform_update(self, loss, opt, network):
"""Performs one backward gradient step on the loss using the given optimizer. Also syncs gradients."""
nan_hook(loss)
opt.zero_grad()
loss.backward()
grads = [p.grad for p in network.parameters()]
nan_hook(grads)
opt.step()
def _get_obs_norm_info(self):
if isinstance(self._obs_normalizer, DummyNormalizer): return {}
mean, std = self._obs_normalizer.mean, self._obs_normalizer.std
if not self._hp.obs_norm_log_groups:
self._hp.obs_norm_log_groups = AttrDict(all=np.arange(mean.shape[0]))
info = {}
for group_key in self._hp.obs_norm_log_groups:
info['obs_norm_' + group_key + '_mean'] = mean[self._hp.obs_norm_log_groups[group_key]].mean()
info['obs_norm_' + group_key + '_std'] = std[self._hp.obs_norm_log_groups[group_key]].mean()
return info
@staticmethod
def load_model_weights(model, checkpoint, epoch='latest'):
"""Loads weights for a given model from the given checkpoint directory."""
checkpoint_dir = checkpoint if os.path.basename(checkpoint) == 'weights' \
else os.path.join(checkpoint, 'weights') # checkpts in 'weights' dir
checkpoint_path = CheckpointHandler.get_resume_ckpt_file(epoch, checkpoint_dir)
CheckpointHandler.load_weights(checkpoint_path, model=model)
@staticmethod
def _remove_batch(d):
"""Adds batch dimension to all tensors in d."""
return map_dict(lambda x: x[0] if (isinstance(x, torch.Tensor) or
isinstance(x, np.ndarray)) else x, d)
@contextmanager
def val_mode(self):
"""Sets validation parameters if desired. To be used like: with agent.val_mode(): ...<do something>..."""
self._is_train = False
self.call_children("switch_to_val", Policy)
yield
self._is_train = True
self.call_children("switch_to_train", Policy)
@contextmanager
def rand_act_mode(self):
"""Performs random actions within context. To be used like: with agent.rand_act_mode(): ...<do something>..."""
self._rand_act_mode = True
yield
self._rand_act_mode = False
@contextmanager
def rollout_mode(self):
"""Sets rollout parameters if desired."""
self._rollout_mode = True
self.call_children("switch_to_rollout", Policy)
yield
self._rollout_mode = False
self.call_children("switch_to_non_rollout", Policy)
def call_children(self, fn, cls):
"""Call function with name fn in all submodules of class cls."""
def conditional_fn(module):
if isinstance(module, cls):
getattr(module, fn).__call__()
self.apply(conditional_fn)
class HierarchicalAgent(BaseAgent):
"""Implements a basic hierarchical agent with high-level and low-level policy/policies."""
def __init__(self, config):
super().__init__(config)
self.hl_agent = self._hp.hl_agent(self._hp.overwrite(self._hp.hl_agent_params))
self.ll_agent = self._hp.ll_agent(self._hp.overwrite(self._hp.ll_agent_params))
self._last_hl_output = None # stores last high-level output to feed to low-level during intermediate steps
def _default_hparams(self):
default_dict = ParamDict({
'hl_agent': None, # high-level agent class
'hl_agent_params': None, # parameters of the high-level agent
'll_agent': None, # low-level agent class
'll_agent_params': None, # parameters of the low-level agent(s)
'update_hl': True, # whether to update high-level agent
'update_ll': True, # whether to update low-level agent(s)
'll_subgoal_reaching_reward': False, # whether to count ll subgoal reaching reward in training
'll_subgoal_reaching_reward_weight': 1e3, # weight for the subgoal reaching reward
})
return super()._default_hparams().overwrite(default_dict)
def act(self, obs):
"""Output dict contains is_hl_step in case high-level action was performed during this action."""
obs_input = obs[None] if len(obs.shape) == 1 else obs # need batch input for agents
output = AttrDict()
if self._perform_hl_step_now:
# perform step with high-level policy
self._last_hl_output = self.hl_agent.act(obs_input)
output.is_hl_step = True
if len(obs_input.shape) == 2 and len(self._last_hl_output.action.shape) == 1:
self._last_hl_output.action = self._last_hl_output.action[None] # add batch dim if necessary
self._last_hl_output.log_prob = self._last_hl_output.log_prob[None]
else:
output.is_hl_step = False
output.update(prefix_dict(self._last_hl_output, 'hl_'))
# perform step with low-level policy
assert self._last_hl_output is not None
output.update(self.ll_agent.act(self.make_ll_obs(obs_input, self._last_hl_output.action)))
return self._remove_batch(output) if len(obs.shape) == 1 else output
def update(self, experience_batches):
"""Updates high-level and low-level agents depending on which parameters are set."""
assert isinstance(experience_batches, AttrDict) # update requires batches for both HL and LL
update_outputs = AttrDict()
if self._hp.update_hl:
hl_update_outputs = self.hl_agent.update(experience_batches.hl_batch)
update_outputs.update(prefix_dict(hl_update_outputs, "hl_"))
if self._hp.update_ll:
ll_update_outputs = self.ll_agent.update(experience_batches.ll_batch)
update_outputs.update(ll_update_outputs)
return update_outputs
def log_outputs(self, logging_stats, rollout_storage, logger, log_images, step):
"""Additionally provides option ot visualize hierarchical agents."""
super().log_outputs(logging_stats, rollout_storage, logger, log_images, step)
if log_images:
self.hl_agent.visualize(logger, rollout_storage, step)
self.ll_agent.visualize(logger, rollout_storage, step)
def _act_rand(self, obs):
"""Performs random actions with high-level policy. Low-level policy operates normally."""
with self.hl_agent.rand_act_mode():
return self.act(obs)
def make_ll_obs(self, obs, hl_action):
"""Creates low-level agent's observation from env observation and HL action."""
return np.concatenate((obs, hl_action), axis=-1)
def add_experience(self, experience_batch):
self.hl_agent.add_experience(experience_batch.hl_batch)
self.ll_agent.add_experience(experience_batch.ll_batch)
def sync_networks(self):
self.hl_agent.sync_networks()
self.ll_agent.sync_networks()
def state_dict(self, *args, **kwargs):
return {'hl_agent': self.hl_agent.state_dict(*args, **kwargs),
'll_agent': self.ll_agent.state_dict(*args, **kwargs)}
def load_state_dict(self, state_dict, *args, **kwargs):
self.hl_agent.load_state_dict(state_dict.pop('hl_agent'), *args, **kwargs)
self.ll_agent.load_state_dict(state_dict.pop('ll_agent'), *args, **kwargs)
def save_state(self, save_dir):
self.hl_agent.save_state(os.path.join(save_dir, 'hl_agent'))
self.ll_agent.save_state(os.path.join(save_dir, 'll_agent'))
def load_state(self, save_dir):
self.hl_agent.load_state(os.path.join(save_dir, 'hl_agent'))
self.ll_agent.load_state(os.path.join(save_dir, 'll_agent'))
def reset(self):
super().reset()
self.hl_agent.reset()
self.ll_agent.reset()
@contextmanager
def rand_act_mode(self):
"""Performs random actions within context. To be used like: with agent.rand_act_mode(): ...<do something>..."""
self._rand_act_mode = True
self.hl_agent._rand_act_mode = True
self.ll_agent._rand_act_mode = True
yield
self._rand_act_mode = False
self.hl_agent._rand_act_mode = False
self.ll_agent._rand_act_mode = False
@property
def _perform_hl_step_now(self):
"""Indicates whether the high-level policy should be executed in the current time step."""
raise NotImplementedError # should be implemented by child class!
class FixedIntervalHierarchicalAgent(HierarchicalAgent):
"""Hierarchical agent that executes high-level actions in fixed temporal intervals."""
def __init__(self, config):
super().__init__(config)
self._steps_since_hl = 0 # number of steps since last high-level step
def _default_hparams(self):
default_dict = ParamDict({
'hl_interval': 3, # temporal interval at which high-level actions are executed
})
return super()._default_hparams().overwrite(default_dict)
def act(self, *args, **kwargs):
output = super().act(*args, **kwargs)
self._steps_since_hl += 1
return output
@property
def _perform_hl_step_now(self):
return self._steps_since_hl % self._hp.hl_interval == 0
def reset(self):
super().reset()
self._steps_since_hl = 0 # start new episode with high-level step
| 48.236152
| 122
| 0.65065
|
import os
import torch
import torch.nn as nn
import numpy as np
from contextlib import contextmanager
from functools import partial
from torch.optim import Adam, SGD
from spirl.utils.general_utils import ParamDict, get_clipped_optimizer, AttrDict, prefix_dict, map_dict, \
nan_hook, np2obj, ConstantSchedule
from spirl.utils.pytorch_utils import RAdam, remove_grads, map2np, map2torch
from spirl.utils.vis_utils import add_caption_to_img, add_captions_to_seq
from spirl.rl.components.normalization import DummyNormalizer
from spirl.rl.components.policy import Policy
from spirl.components.checkpointer import CheckpointHandler
from spirl.rl.utils.mpi import sync_grads
class BaseAgent(nn.Module):
def __init__(self, config):
super().__init__()
self._hp = self._default_hparams().overwrite(config)
self.device = self._hp.device
self._is_train = True
self._rand_act_mode = False
self._rollout_mode = False
self._obs_normalizer = self._hp.obs_normalizer(self._hp.obs_normalizer_params)
def _default_hparams(self):
default_dict = ParamDict({
'device': None,
'discount_factor': 0.99,
'optimizer': 'adam',
'gradient_clip': None,
'momentum': 0,
'adam_beta': 0.9,
'update_iterations': 1,
'target_network_update_factor': 5e-3,
'batch_size': 64,
'obs_normalizer': DummyNormalizer,
'obs_normalizer_params': {},
'obs_norm_log_groups': {},
'log_videos': True,
'log_video_caption': False,
'num_workers': None,
})
return default_dict
def act(self, obs):
if self._rand_act_mode:
return self._act_rand(obs)
else:
return self._act(obs)
def _act(self, obs):
raise NotImplementedError
def _act_rand(self, obs):
raise NotImplementedError
def update(self, experience_batch):
raise NotImplementedError
def add_experience(self, experience_batch):
print("### This agent does not support additional experience! ###")
def log_outputs(self, logging_stats, rollout_storage, logger, log_images, step):
logger.log_scalar_dict(logging_stats, prefix='train' if self._is_train else 'val', step=step)
if log_images:
assert rollout_storage is not None
if 'image' in rollout_storage and self._hp.log_videos:
if self._hp.log_video_caption:
vids = [np.stack(add_captions_to_seq(rollout.image, np2obj(rollout.info))).transpose(0, 3, 1, 2)
for rollout in rollout_storage.get()[-logger.n_logged_samples:]]
else:
vids = [np.stack(rollout.image).transpose(0, 3, 1, 2)
for rollout in rollout_storage.get()[-logger.n_logged_samples:]]
logger.log_videos(vids, name="rollouts", step=step)
self.visualize(logger, rollout_storage, step)
def visualize(self, logger, rollout_storage, step):
pass
def reset(self):
pass
def save_state(self, save_dir):
pass
def load_state(self, save_dir):
pass
def sync_networks(self):
raise NotImplementedError
def _soft_update_target_network(self, target, source):
for target_param, param in zip(target.parameters(), source.parameters()):
target_param.data.copy_(self._hp.target_network_update_factor * param.data +
(1 - self._hp.target_network_update_factor) * target_param.data)
def _copy_to_target_network(self, target, source):
for target_param, source_param in zip(target.parameters(), source.parameters()):
target_param.data.copy_(source_param.data)
def _get_optimizer(self, optimizer, model, lr):
if optimizer == 'adam':
get_optim = partial(get_clipped_optimizer, optimizer_type=Adam, betas=(self._hp.adam_beta, 0.999))
elif optimizer == 'radam':
get_optim = partial(get_clipped_optimizer, optimizer_type=RAdam, betas=(self._hp.adam_beta, 0.999))
elif optimizer == 'sgd':
get_optim = partial(get_clipped_optimizer, optimizer_type=SGD, momentum=self._hp.momentum)
else:
raise ValueError("Optimizer '{}' not supported!".format(optimizer))
optim = partial(get_optim, gradient_clip=self._hp.gradient_clip)
return optim(filter(lambda p: p.requires_grad, model.parameters()), lr=lr)
def _perform_update(self, loss, opt, network):
nan_hook(loss)
opt.zero_grad()
loss.backward()
grads = [p.grad for p in network.parameters()]
nan_hook(grads)
opt.step()
def _get_obs_norm_info(self):
if isinstance(self._obs_normalizer, DummyNormalizer): return {}
mean, std = self._obs_normalizer.mean, self._obs_normalizer.std
if not self._hp.obs_norm_log_groups:
self._hp.obs_norm_log_groups = AttrDict(all=np.arange(mean.shape[0]))
info = {}
for group_key in self._hp.obs_norm_log_groups:
info['obs_norm_' + group_key + '_mean'] = mean[self._hp.obs_norm_log_groups[group_key]].mean()
info['obs_norm_' + group_key + '_std'] = std[self._hp.obs_norm_log_groups[group_key]].mean()
return info
@staticmethod
def load_model_weights(model, checkpoint, epoch='latest'):
checkpoint_dir = checkpoint if os.path.basename(checkpoint) == 'weights' \
else os.path.join(checkpoint, 'weights')
checkpoint_path = CheckpointHandler.get_resume_ckpt_file(epoch, checkpoint_dir)
CheckpointHandler.load_weights(checkpoint_path, model=model)
@staticmethod
def _remove_batch(d):
return map_dict(lambda x: x[0] if (isinstance(x, torch.Tensor) or
isinstance(x, np.ndarray)) else x, d)
@contextmanager
def val_mode(self):
self._is_train = False
self.call_children("switch_to_val", Policy)
yield
self._is_train = True
self.call_children("switch_to_train", Policy)
@contextmanager
def rand_act_mode(self):
self._rand_act_mode = True
yield
self._rand_act_mode = False
@contextmanager
def rollout_mode(self):
self._rollout_mode = True
self.call_children("switch_to_rollout", Policy)
yield
self._rollout_mode = False
self.call_children("switch_to_non_rollout", Policy)
def call_children(self, fn, cls):
def conditional_fn(module):
if isinstance(module, cls):
getattr(module, fn).__call__()
self.apply(conditional_fn)
class HierarchicalAgent(BaseAgent):
def __init__(self, config):
super().__init__(config)
self.hl_agent = self._hp.hl_agent(self._hp.overwrite(self._hp.hl_agent_params))
self.ll_agent = self._hp.ll_agent(self._hp.overwrite(self._hp.ll_agent_params))
self._last_hl_output = None
def _default_hparams(self):
default_dict = ParamDict({
'hl_agent': None,
'hl_agent_params': None,
'll_agent': None,
'll_agent_params': None,
'update_hl': True,
'update_ll': True,
'll_subgoal_reaching_reward': False,
'll_subgoal_reaching_reward_weight': 1e3,
})
return super()._default_hparams().overwrite(default_dict)
def act(self, obs):
obs_input = obs[None] if len(obs.shape) == 1 else obs
output = AttrDict()
if self._perform_hl_step_now:
self._last_hl_output = self.hl_agent.act(obs_input)
output.is_hl_step = True
if len(obs_input.shape) == 2 and len(self._last_hl_output.action.shape) == 1:
self._last_hl_output.action = self._last_hl_output.action[None]
self._last_hl_output.log_prob = self._last_hl_output.log_prob[None]
else:
output.is_hl_step = False
output.update(prefix_dict(self._last_hl_output, 'hl_'))
assert self._last_hl_output is not None
output.update(self.ll_agent.act(self.make_ll_obs(obs_input, self._last_hl_output.action)))
return self._remove_batch(output) if len(obs.shape) == 1 else output
def update(self, experience_batches):
assert isinstance(experience_batches, AttrDict)
update_outputs = AttrDict()
if self._hp.update_hl:
hl_update_outputs = self.hl_agent.update(experience_batches.hl_batch)
update_outputs.update(prefix_dict(hl_update_outputs, "hl_"))
if self._hp.update_ll:
ll_update_outputs = self.ll_agent.update(experience_batches.ll_batch)
update_outputs.update(ll_update_outputs)
return update_outputs
def log_outputs(self, logging_stats, rollout_storage, logger, log_images, step):
super().log_outputs(logging_stats, rollout_storage, logger, log_images, step)
if log_images:
self.hl_agent.visualize(logger, rollout_storage, step)
self.ll_agent.visualize(logger, rollout_storage, step)
def _act_rand(self, obs):
with self.hl_agent.rand_act_mode():
return self.act(obs)
def make_ll_obs(self, obs, hl_action):
return np.concatenate((obs, hl_action), axis=-1)
def add_experience(self, experience_batch):
self.hl_agent.add_experience(experience_batch.hl_batch)
self.ll_agent.add_experience(experience_batch.ll_batch)
def sync_networks(self):
self.hl_agent.sync_networks()
self.ll_agent.sync_networks()
def state_dict(self, *args, **kwargs):
return {'hl_agent': self.hl_agent.state_dict(*args, **kwargs),
'll_agent': self.ll_agent.state_dict(*args, **kwargs)}
def load_state_dict(self, state_dict, *args, **kwargs):
self.hl_agent.load_state_dict(state_dict.pop('hl_agent'), *args, **kwargs)
self.ll_agent.load_state_dict(state_dict.pop('ll_agent'), *args, **kwargs)
def save_state(self, save_dir):
self.hl_agent.save_state(os.path.join(save_dir, 'hl_agent'))
self.ll_agent.save_state(os.path.join(save_dir, 'll_agent'))
def load_state(self, save_dir):
self.hl_agent.load_state(os.path.join(save_dir, 'hl_agent'))
self.ll_agent.load_state(os.path.join(save_dir, 'll_agent'))
def reset(self):
super().reset()
self.hl_agent.reset()
self.ll_agent.reset()
@contextmanager
def rand_act_mode(self):
self._rand_act_mode = True
self.hl_agent._rand_act_mode = True
self.ll_agent._rand_act_mode = True
yield
self._rand_act_mode = False
self.hl_agent._rand_act_mode = False
self.ll_agent._rand_act_mode = False
@property
def _perform_hl_step_now(self):
raise NotImplementedError
class FixedIntervalHierarchicalAgent(HierarchicalAgent):
def __init__(self, config):
super().__init__(config)
self._steps_since_hl = 0
def _default_hparams(self):
default_dict = ParamDict({
'hl_interval': 3,
})
return super()._default_hparams().overwrite(default_dict)
def act(self, *args, **kwargs):
output = super().act(*args, **kwargs)
self._steps_since_hl += 1
return output
@property
def _perform_hl_step_now(self):
return self._steps_since_hl % self._hp.hl_interval == 0
def reset(self):
super().reset()
self._steps_since_hl = 0
| true
| true
|
f70576f56e1dace795c9e93cbc74e95d6940c629
| 9,236
|
py
|
Python
|
bot/core/conversation.py
|
lugodev/telegram-pocket-bot
|
ae9cbfc1aa14c3bd8dd292c477f69891d82d9d94
|
[
"MIT"
] | 1
|
2021-11-12T04:08:35.000Z
|
2021-11-12T04:08:35.000Z
|
bot/core/conversation.py
|
lugodev/telegram-pocket-bot
|
ae9cbfc1aa14c3bd8dd292c477f69891d82d9d94
|
[
"MIT"
] | null | null | null |
bot/core/conversation.py
|
lugodev/telegram-pocket-bot
|
ae9cbfc1aa14c3bd8dd292c477f69891d82d9d94
|
[
"MIT"
] | null | null | null |
import time
import emoji
from telegram import InlineKeyboardMarkup, ParseMode, InlineKeyboardButton
from telegram.ext import run_async, ConversationHandler
from telegram.error import TelegramError
from django.db.models import Q
from . import constants, authentication, renderers, models
def send_broadcast(admin, broadcast, context):
bot = context.bot
success = 0
errors = 0
for user in models.BotUser.objects.all():
try:
if user.language == 'es':
bot.send_message(
chat_id=user.chat_id,
text=broadcast.text_es
)
elif user.language == 'en':
bot.send_message(
chat_id=user.chat_id,
text=broadcast.text_en
)
success += 1
except Exception as e:
user.has_blocked_bot = True
user.save()
errors += 1
time.sleep(1)
broadcast.success = success
broadcast.errors = errors
broadcast.sent = True
broadcast.save()
bot.send_message(
chat_id=admin.chat_id,
text='Enviados: {}\nErrores: {}'.format(
success,
errors
),
)
@run_async
def feedback(update, context):
query = update.callback_query
query.answer()
user = authentication.authenticate(update.effective_user)
text = ''
if user.language == 'es':
text = (
'¿Deseas enviar tu opinión para ayudarme a mejorar el bot?'
'\n\nPuedes reportar errores, solicitar nuevas funcionalidades o mejoras.\n\n'
'Envíame tu opinión o ejecuta /cancel.'
)
elif user.language == 'en':
text = (
'Do you want to send me your feedback to help me improve the bot?'
'\n\nYou can report bugs, request new features or improvements.\n\n'
'Send your feedback or execute /cancel.'
)
query.edit_message_text(
text=text
)
return constants.INPUT_FEEDBACK
@run_async
def input_feedback(update, context):
bot = context.bot
message = update.message.text
user = authentication.authenticate(update.effective_user)
_, keyboard = renderers.main_markup(user)
if str(message).lower() == '/cancel':
if user.language == 'es':
update.message.chat.send_message(
text='✅ Se canceló la acción que estabas llevando a cabo.',
reply_markup=InlineKeyboardMarkup(keyboard)
)
elif user.language == 'en':
update.message.chat.send_message(
text='✅ The action has been canceled.',
reply_markup=InlineKeyboardMarkup(keyboard)
)
else:
name = user.first_name
if user.last_name is not None:
name += ' ' + user.last_name
if user.username is not None:
name += '(@{})'.format(user.username)
text = (
'💬 Feedback from {name}:'
'\n\n{message}'.format(
name=name,
message=message
)
)
# persist feedback
models.Feedback.objects.create(
bot_user=user,
message=message
)
# send feedback to admins
admins = models.BotUser.objects.filter(is_admin=True)
for admin in admins:
bot.send_message(
chat_id=admin.chat_id,
text=text
)
# thanks
text = ''
if user.language == 'es':
text = 'Muchas gracias por tu opinión.'
elif user.language == 'en':
text = 'Thank you for your feedback.'
bot.send_message(
chat_id=user.chat_id,
text=text,
reply_markup=InlineKeyboardMarkup(keyboard)
)
return ConversationHandler.END
@run_async
def input_broadcast_message(update, context):
message = update.message.text
bot = context.bot
user = authentication.authenticate(update.effective_user)
try:
broadcast = models.Broadcast.objects.get(sent=False)
if broadcast.setting_lang == 'es':
broadcast.text_es = message
elif broadcast.setting_lang == 'en':
broadcast.text_en = message
broadcast.setting_lang = None
broadcast.save()
text, keyboard = renderers.broadcast_markup(user, context)
bot.send_message(
chat_id=user.chat_id,
text=text,
reply_markup=InlineKeyboardMarkup(keyboard)
)
return ConversationHandler.END
except models.Notification.DoesNotExist:
return ConversationHandler.END
@run_async
def input_direct_message(update, context):
bot = context.bot
message = update.message.text
user = authentication.authenticate(update.effective_user)
if user.is_admin:
context.user_data['md_text'] = message
bot.send_message(
chat_id=user.chat_id,
text=message,
reply_markup=InlineKeyboardMarkup([
[InlineKeyboardButton(text='Enviar', callback_data='confirm_md')],
[InlineKeyboardButton(text='Cancelar', callback_data='cancel_md')],
])
)
return ConversationHandler.END
@run_async
def broadcast(update, context):
query = update.callback_query
query.answer()
user = authentication.authenticate(update.effective_user)
params = query.data.split(' ')
operation = params[1]
value = None
if params.__len__() > 2:
value = params[2]
try:
broad = models.Broadcast.objects.get(sent=False)
if operation == 'lang':
broad.setting_lang = value
broad.save()
query.edit_message_text(
text='Envíame el mensaje en idioma "{}"'.format(value)
)
return constants.INPUT_BROADCAST_MESSAGE
if operation == 'send':
send_broadcast(
admin=user,
broadcast=broad,
context=context
)
return ConversationHandler.END
except models.Broadcast.DoesNotExist:
query.edit_message_text(
text='No hay ninguna notificación en curso, comienza una nueva.'
)
return ConversationHandler.END
@run_async
def direct_message(update, context):
query = update.callback_query
query.answer()
user = authentication.authenticate(update.effective_user)
params = query.data.split(' ')
id = params[1]
if user.is_admin:
context.user_data['md_id'] = id
context.bot.send_message(
chat_id=user.chat_id,
text='🤖 Escribe el mensaje para enviar al usuario.'
)
return constants.INPUT_DIRECT_MESSAGE
@run_async
def send_direct_message(update, context):
query = update.callback_query
query.answer()
user = authentication.authenticate(update.effective_user)
if user.is_admin:
bot = context.bot
id = context.user_data.get('md_id')
text = context.user_data.get('md_text')
try:
destiny_user = models.BotUser.objects.get(pk=id)
try:
bot.send_message(
chat_id=destiny_user.chat_id,
text=text
)
query.edit_message_text(
text='✅ Mensaje enviado.'
)
destiny_user.has_blocked_bot = False
destiny_user.save()
except:
destiny_user.has_blocked_bot = True
destiny_user.save()
bot.send_message(
chat_id=user.chat_id,
text='⚠️ No fue posible enviar el mensaje.'
)
except models.BotUser.DoesNotExist:
pass
del context.user_data['md_id']
del context.user_data['md_text']
@run_async
def cancel_direct_message(update, context):
query = update.callback_query
query.answer()
user = authentication.authenticate(update.effective_user)
if user.is_admin:
bot = context.bot
query.edit_message_text(
text='✅ Envío cancelado.'
)
del context.user_data['md_id']
del context.user_data['md_text']
@run_async
def input_user_criteria(update, context):
bot = context.bot
message = update.message.text
user = authentication.authenticate(update.effective_user)
if user.is_admin:
criteria = message
users = models.BotUser.objects.filter(
Q(username__icontains=criteria) |
Q(first_name__icontains=criteria) |
Q(last_name__icontains=criteria)
)
bot.send_message(
chat_id=user.chat_id,
text='{} resultados'.format(users.count()),
)
for u in users:
text, keyboard = renderers.user_markup(u)
bot.send_message(
chat_id=user.chat_id,
text=text,
reply_markup=InlineKeyboardMarkup(keyboard)
)
return ConversationHandler.END
| 24.962162
| 90
| 0.581637
|
import time
import emoji
from telegram import InlineKeyboardMarkup, ParseMode, InlineKeyboardButton
from telegram.ext import run_async, ConversationHandler
from telegram.error import TelegramError
from django.db.models import Q
from . import constants, authentication, renderers, models
def send_broadcast(admin, broadcast, context):
bot = context.bot
success = 0
errors = 0
for user in models.BotUser.objects.all():
try:
if user.language == 'es':
bot.send_message(
chat_id=user.chat_id,
text=broadcast.text_es
)
elif user.language == 'en':
bot.send_message(
chat_id=user.chat_id,
text=broadcast.text_en
)
success += 1
except Exception as e:
user.has_blocked_bot = True
user.save()
errors += 1
time.sleep(1)
broadcast.success = success
broadcast.errors = errors
broadcast.sent = True
broadcast.save()
bot.send_message(
chat_id=admin.chat_id,
text='Enviados: {}\nErrores: {}'.format(
success,
errors
),
)
@run_async
def feedback(update, context):
query = update.callback_query
query.answer()
user = authentication.authenticate(update.effective_user)
text = ''
if user.language == 'es':
text = (
'¿Deseas enviar tu opinión para ayudarme a mejorar el bot?'
'\n\nPuedes reportar errores, solicitar nuevas funcionalidades o mejoras.\n\n'
'Envíame tu opinión o ejecuta /cancel.'
)
elif user.language == 'en':
text = (
'Do you want to send me your feedback to help me improve the bot?'
'\n\nYou can report bugs, request new features or improvements.\n\n'
'Send your feedback or execute /cancel.'
)
query.edit_message_text(
text=text
)
return constants.INPUT_FEEDBACK
@run_async
def input_feedback(update, context):
bot = context.bot
message = update.message.text
user = authentication.authenticate(update.effective_user)
_, keyboard = renderers.main_markup(user)
if str(message).lower() == '/cancel':
if user.language == 'es':
update.message.chat.send_message(
text='✅ Se canceló la acción que estabas llevando a cabo.',
reply_markup=InlineKeyboardMarkup(keyboard)
)
elif user.language == 'en':
update.message.chat.send_message(
text='✅ The action has been canceled.',
reply_markup=InlineKeyboardMarkup(keyboard)
)
else:
name = user.first_name
if user.last_name is not None:
name += ' ' + user.last_name
if user.username is not None:
name += '(@{})'.format(user.username)
text = (
'💬 Feedback from {name}:'
'\n\n{message}'.format(
name=name,
message=message
)
)
models.Feedback.objects.create(
bot_user=user,
message=message
)
admins = models.BotUser.objects.filter(is_admin=True)
for admin in admins:
bot.send_message(
chat_id=admin.chat_id,
text=text
)
text = ''
if user.language == 'es':
text = 'Muchas gracias por tu opinión.'
elif user.language == 'en':
text = 'Thank you for your feedback.'
bot.send_message(
chat_id=user.chat_id,
text=text,
reply_markup=InlineKeyboardMarkup(keyboard)
)
return ConversationHandler.END
@run_async
def input_broadcast_message(update, context):
message = update.message.text
bot = context.bot
user = authentication.authenticate(update.effective_user)
try:
broadcast = models.Broadcast.objects.get(sent=False)
if broadcast.setting_lang == 'es':
broadcast.text_es = message
elif broadcast.setting_lang == 'en':
broadcast.text_en = message
broadcast.setting_lang = None
broadcast.save()
text, keyboard = renderers.broadcast_markup(user, context)
bot.send_message(
chat_id=user.chat_id,
text=text,
reply_markup=InlineKeyboardMarkup(keyboard)
)
return ConversationHandler.END
except models.Notification.DoesNotExist:
return ConversationHandler.END
@run_async
def input_direct_message(update, context):
bot = context.bot
message = update.message.text
user = authentication.authenticate(update.effective_user)
if user.is_admin:
context.user_data['md_text'] = message
bot.send_message(
chat_id=user.chat_id,
text=message,
reply_markup=InlineKeyboardMarkup([
[InlineKeyboardButton(text='Enviar', callback_data='confirm_md')],
[InlineKeyboardButton(text='Cancelar', callback_data='cancel_md')],
])
)
return ConversationHandler.END
@run_async
def broadcast(update, context):
query = update.callback_query
query.answer()
user = authentication.authenticate(update.effective_user)
params = query.data.split(' ')
operation = params[1]
value = None
if params.__len__() > 2:
value = params[2]
try:
broad = models.Broadcast.objects.get(sent=False)
if operation == 'lang':
broad.setting_lang = value
broad.save()
query.edit_message_text(
text='Envíame el mensaje en idioma "{}"'.format(value)
)
return constants.INPUT_BROADCAST_MESSAGE
if operation == 'send':
send_broadcast(
admin=user,
broadcast=broad,
context=context
)
return ConversationHandler.END
except models.Broadcast.DoesNotExist:
query.edit_message_text(
text='No hay ninguna notificación en curso, comienza una nueva.'
)
return ConversationHandler.END
@run_async
def direct_message(update, context):
query = update.callback_query
query.answer()
user = authentication.authenticate(update.effective_user)
params = query.data.split(' ')
id = params[1]
if user.is_admin:
context.user_data['md_id'] = id
context.bot.send_message(
chat_id=user.chat_id,
text='🤖 Escribe el mensaje para enviar al usuario.'
)
return constants.INPUT_DIRECT_MESSAGE
@run_async
def send_direct_message(update, context):
query = update.callback_query
query.answer()
user = authentication.authenticate(update.effective_user)
if user.is_admin:
bot = context.bot
id = context.user_data.get('md_id')
text = context.user_data.get('md_text')
try:
destiny_user = models.BotUser.objects.get(pk=id)
try:
bot.send_message(
chat_id=destiny_user.chat_id,
text=text
)
query.edit_message_text(
text='✅ Mensaje enviado.'
)
destiny_user.has_blocked_bot = False
destiny_user.save()
except:
destiny_user.has_blocked_bot = True
destiny_user.save()
bot.send_message(
chat_id=user.chat_id,
text='⚠️ No fue posible enviar el mensaje.'
)
except models.BotUser.DoesNotExist:
pass
del context.user_data['md_id']
del context.user_data['md_text']
@run_async
def cancel_direct_message(update, context):
query = update.callback_query
query.answer()
user = authentication.authenticate(update.effective_user)
if user.is_admin:
bot = context.bot
query.edit_message_text(
text='✅ Envío cancelado.'
)
del context.user_data['md_id']
del context.user_data['md_text']
@run_async
def input_user_criteria(update, context):
bot = context.bot
message = update.message.text
user = authentication.authenticate(update.effective_user)
if user.is_admin:
criteria = message
users = models.BotUser.objects.filter(
Q(username__icontains=criteria) |
Q(first_name__icontains=criteria) |
Q(last_name__icontains=criteria)
)
bot.send_message(
chat_id=user.chat_id,
text='{} resultados'.format(users.count()),
)
for u in users:
text, keyboard = renderers.user_markup(u)
bot.send_message(
chat_id=user.chat_id,
text=text,
reply_markup=InlineKeyboardMarkup(keyboard)
)
return ConversationHandler.END
| true
| true
|
f705779e537fd0636da37fcab248e0fca545bfc5
| 10,249
|
py
|
Python
|
elasticapm/contrib/django/client.py
|
haider-zada96/apm_test
|
fa16fc30a055625abcde287073822cdbe979846c
|
[
"BSD-3-Clause"
] | 2
|
2019-02-15T20:23:39.000Z
|
2019-02-15T20:26:06.000Z
|
elasticapm/contrib/django/client.py
|
haider-zada96/apm_test
|
fa16fc30a055625abcde287073822cdbe979846c
|
[
"BSD-3-Clause"
] | null | null | null |
elasticapm/contrib/django/client.py
|
haider-zada96/apm_test
|
fa16fc30a055625abcde287073822cdbe979846c
|
[
"BSD-3-Clause"
] | null | null | null |
"""
elasticapm.contrib.django.client
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2011-2017 Elasticsearch
Large portions are
:copyright: (c) 2010 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
import logging
import django
from django.conf import settings as django_settings
from django.core.exceptions import DisallowedHost
from django.db import DatabaseError
from django.http import HttpRequest
from elasticapm.base import Client
from elasticapm.conf import constants
from elasticapm.contrib.django.utils import iterate_with_template_sources
from elasticapm.utils import compat, encoding, get_url_dict
from elasticapm.utils.module_import import import_string
from elasticapm.utils.wsgi import get_environ, get_headers
__all__ = ("DjangoClient",)
default_client_class = "elasticapm.contrib.django.DjangoClient"
_client = (None, None)
def get_client(client=None):
"""
Get an ElasticAPM client.
:param client:
:return:
:rtype: elasticapm.base.Client
"""
global _client
tmp_client = client is not None
if not tmp_client:
config = getattr(django_settings, "ELASTIC_APM", {})
client = config.get("CLIENT", default_client_class)
if _client[0] != client:
client_class = import_string(client)
instance = client_class()
if not tmp_client:
_client = (client, instance)
return instance
return _client[1]
class DjangoClient(Client):
logger = logging.getLogger("elasticapm.errors.client.django")
def __init__(self, config=None, **inline):
if config is None:
config = getattr(django_settings, "ELASTIC_APM", {})
if "framework_name" not in inline:
inline["framework_name"] = "django"
inline["framework_version"] = django.get_version()
super(DjangoClient, self).__init__(config, **inline)
def get_user_info(self, request):
user_info = {}
if not hasattr(request, "user"):
return user_info
try:
user = request.user
if hasattr(user, "is_authenticated"):
if callable(user.is_authenticated):
user_info["is_authenticated"] = user.is_authenticated()
else:
user_info["is_authenticated"] = bool(user.is_authenticated)
if hasattr(user, "id"):
user_info["id"] = encoding.keyword_field(user.id)
if hasattr(user, "get_username"):
user_info["username"] = encoding.keyword_field(user.get_username())
elif hasattr(user, "username"):
user_info["username"] = encoding.keyword_field(user.username)
if hasattr(user, "email"):
user_info["email"] = user.email
except DatabaseError:
# If the connection is closed or similar, we'll just skip this
return {}
return user_info
def get_data_from_request(self, request, capture_body=False):
result = {
"env": dict(get_environ(request.META)),
"headers": dict(get_headers(request.META)),
"method": request.method,
"socket": {"remote_address": request.META.get("REMOTE_ADDR"), "encrypted": request.is_secure()},
"cookies": dict(request.COOKIES),
}
if request.method in constants.HTTP_WITH_BODY:
content_type = request.META.get("CONTENT_TYPE")
if content_type == "application/x-www-form-urlencoded":
data = compat.multidict_to_dict(request.POST)
elif content_type and content_type.startswith("multipart/form-data"):
data = compat.multidict_to_dict(request.POST)
if request.FILES:
data["_files"] = {field: file.name for field, file in compat.iteritems(request.FILES)}
else:
try:
data = request.body
except Exception:
data = "<unavailable>"
result["body"] = data if (capture_body or not data) else "[REDACTED]"
if hasattr(request, "get_raw_uri"):
# added in Django 1.9
url = request.get_raw_uri()
else:
try:
# Requires host to be in ALLOWED_HOSTS, might throw a
# DisallowedHost exception
url = request.build_absolute_uri()
except DisallowedHost:
# We can't figure out the real URL, so we have to set it to
# DisallowedHost
result["url"] = {"full": "DisallowedHost"}
url = None
if url:
result["url"] = get_url_dict(url)
return result
def get_data_from_response(self, response):
result = {"status_code": response.status_code}
if hasattr(response, "items"):
result["headers"] = dict(response.items())
return result
def capture(self, event_type, request=None, **kwargs):
if "context" not in kwargs:
kwargs["context"] = context = {}
else:
context = kwargs["context"]
is_http_request = isinstance(request, HttpRequest)
if is_http_request:
context["request"] = self.get_data_from_request(
request, capture_body=self.config.capture_body in ("all", "errors")
)
context["user"] = self.get_user_info(request)
result = super(DjangoClient, self).capture(event_type, **kwargs)
if is_http_request:
# attach the elasticapm object to the request
request._elasticapm = {"service_name": self.config.service_name, "id": result}
return result
def _get_stack_info_for_trace(
self,
frames,
library_frame_context_lines=None,
in_app_frame_context_lines=None,
with_locals=True,
locals_processor_func=None,
):
"""If the stacktrace originates within the elasticapm module, it will skip
frames until some other module comes up."""
return list(
iterate_with_template_sources(
frames,
with_locals=with_locals,
library_frame_context_lines=library_frame_context_lines,
in_app_frame_context_lines=in_app_frame_context_lines,
include_paths_re=self.include_paths_re,
exclude_paths_re=self.exclude_paths_re,
locals_processor_func=locals_processor_func,
)
)
def send(self, url, **kwargs):
"""
Serializes and signs ``data`` and passes the payload off to ``send_remote``
If ``server`` was passed into the constructor, this will serialize the data and pipe it to
the server using ``send_remote()``.
"""
if self.config.server_url:
return super(DjangoClient, self).send(url, **kwargs)
else:
self.error_logger.error("No server configured, and elasticapm not installed. Cannot send message")
return None
class ProxyClient(object):
"""
A proxy which represents the current client at all times.
"""
# introspection support:
__members__ = property(lambda x: x.__dir__())
# Need to pretend to be the wrapped class, for the sake of objects that care
# about this (especially in equality tests)
__class__ = property(lambda x: get_client().__class__)
__dict__ = property(lambda o: get_client().__dict__)
__repr__ = lambda: repr(get_client())
__getattr__ = lambda x, o: getattr(get_client(), o)
__setattr__ = lambda x, o, v: setattr(get_client(), o, v)
__delattr__ = lambda x, o: delattr(get_client(), o)
__lt__ = lambda x, o: get_client() < o
__le__ = lambda x, o: get_client() <= o
__eq__ = lambda x, o: get_client() == o
__ne__ = lambda x, o: get_client() != o
__gt__ = lambda x, o: get_client() > o
__ge__ = lambda x, o: get_client() >= o
if compat.PY2:
__cmp__ = lambda x, o: cmp(get_client(), o) # noqa F821
__hash__ = lambda x: hash(get_client())
# attributes are currently not callable
# __call__ = lambda x, *a, **kw: get_client()(*a, **kw)
__nonzero__ = lambda x: bool(get_client())
__len__ = lambda x: len(get_client())
__getitem__ = lambda x, i: get_client()[i]
__iter__ = lambda x: iter(get_client())
__contains__ = lambda x, i: i in get_client()
__getslice__ = lambda x, i, j: get_client()[i:j]
__add__ = lambda x, o: get_client() + o
__sub__ = lambda x, o: get_client() - o
__mul__ = lambda x, o: get_client() * o
__floordiv__ = lambda x, o: get_client() // o
__mod__ = lambda x, o: get_client() % o
__divmod__ = lambda x, o: get_client().__divmod__(o)
__pow__ = lambda x, o: get_client() ** o
__lshift__ = lambda x, o: get_client() << o
__rshift__ = lambda x, o: get_client() >> o
__and__ = lambda x, o: get_client() & o
__xor__ = lambda x, o: get_client() ^ o
__or__ = lambda x, o: get_client() | o
__div__ = lambda x, o: get_client().__div__(o)
__truediv__ = lambda x, o: get_client().__truediv__(o)
__neg__ = lambda x: -(get_client())
__pos__ = lambda x: +(get_client())
__abs__ = lambda x: abs(get_client())
__invert__ = lambda x: ~(get_client())
__complex__ = lambda x: complex(get_client())
__int__ = lambda x: int(get_client())
if compat.PY2:
__long__ = lambda x: long(get_client()) # noqa F821
__float__ = lambda x: float(get_client())
__str__ = lambda x: str(get_client())
__unicode__ = lambda x: compat.text_type(get_client())
__oct__ = lambda x: oct(get_client())
__hex__ = lambda x: hex(get_client())
__index__ = lambda x: get_client().__index__()
__coerce__ = lambda x, o: x.__coerce__(x, o)
__enter__ = lambda x: x.__enter__()
__exit__ = lambda x, *a, **kw: x.__exit__(*a, **kw)
client = ProxyClient()
def _get_installed_apps_paths():
"""
Generate a list of modules in settings.INSTALLED_APPS.
"""
out = set()
for app in django_settings.INSTALLED_APPS:
out.add(app)
return out
| 36.088028
| 110
| 0.621524
|
from __future__ import absolute_import
import logging
import django
from django.conf import settings as django_settings
from django.core.exceptions import DisallowedHost
from django.db import DatabaseError
from django.http import HttpRequest
from elasticapm.base import Client
from elasticapm.conf import constants
from elasticapm.contrib.django.utils import iterate_with_template_sources
from elasticapm.utils import compat, encoding, get_url_dict
from elasticapm.utils.module_import import import_string
from elasticapm.utils.wsgi import get_environ, get_headers
__all__ = ("DjangoClient",)
default_client_class = "elasticapm.contrib.django.DjangoClient"
_client = (None, None)
def get_client(client=None):
global _client
tmp_client = client is not None
if not tmp_client:
config = getattr(django_settings, "ELASTIC_APM", {})
client = config.get("CLIENT", default_client_class)
if _client[0] != client:
client_class = import_string(client)
instance = client_class()
if not tmp_client:
_client = (client, instance)
return instance
return _client[1]
class DjangoClient(Client):
logger = logging.getLogger("elasticapm.errors.client.django")
def __init__(self, config=None, **inline):
if config is None:
config = getattr(django_settings, "ELASTIC_APM", {})
if "framework_name" not in inline:
inline["framework_name"] = "django"
inline["framework_version"] = django.get_version()
super(DjangoClient, self).__init__(config, **inline)
def get_user_info(self, request):
user_info = {}
if not hasattr(request, "user"):
return user_info
try:
user = request.user
if hasattr(user, "is_authenticated"):
if callable(user.is_authenticated):
user_info["is_authenticated"] = user.is_authenticated()
else:
user_info["is_authenticated"] = bool(user.is_authenticated)
if hasattr(user, "id"):
user_info["id"] = encoding.keyword_field(user.id)
if hasattr(user, "get_username"):
user_info["username"] = encoding.keyword_field(user.get_username())
elif hasattr(user, "username"):
user_info["username"] = encoding.keyword_field(user.username)
if hasattr(user, "email"):
user_info["email"] = user.email
except DatabaseError:
return {}
return user_info
def get_data_from_request(self, request, capture_body=False):
result = {
"env": dict(get_environ(request.META)),
"headers": dict(get_headers(request.META)),
"method": request.method,
"socket": {"remote_address": request.META.get("REMOTE_ADDR"), "encrypted": request.is_secure()},
"cookies": dict(request.COOKIES),
}
if request.method in constants.HTTP_WITH_BODY:
content_type = request.META.get("CONTENT_TYPE")
if content_type == "application/x-www-form-urlencoded":
data = compat.multidict_to_dict(request.POST)
elif content_type and content_type.startswith("multipart/form-data"):
data = compat.multidict_to_dict(request.POST)
if request.FILES:
data["_files"] = {field: file.name for field, file in compat.iteritems(request.FILES)}
else:
try:
data = request.body
except Exception:
data = "<unavailable>"
result["body"] = data if (capture_body or not data) else "[REDACTED]"
if hasattr(request, "get_raw_uri"):
# added in Django 1.9
url = request.get_raw_uri()
else:
try:
# Requires host to be in ALLOWED_HOSTS, might throw a
# DisallowedHost exception
url = request.build_absolute_uri()
except DisallowedHost:
# We can't figure out the real URL, so we have to set it to
result["url"] = {"full": "DisallowedHost"}
url = None
if url:
result["url"] = get_url_dict(url)
return result
def get_data_from_response(self, response):
result = {"status_code": response.status_code}
if hasattr(response, "items"):
result["headers"] = dict(response.items())
return result
def capture(self, event_type, request=None, **kwargs):
if "context" not in kwargs:
kwargs["context"] = context = {}
else:
context = kwargs["context"]
is_http_request = isinstance(request, HttpRequest)
if is_http_request:
context["request"] = self.get_data_from_request(
request, capture_body=self.config.capture_body in ("all", "errors")
)
context["user"] = self.get_user_info(request)
result = super(DjangoClient, self).capture(event_type, **kwargs)
if is_http_request:
request._elasticapm = {"service_name": self.config.service_name, "id": result}
return result
def _get_stack_info_for_trace(
self,
frames,
library_frame_context_lines=None,
in_app_frame_context_lines=None,
with_locals=True,
locals_processor_func=None,
):
return list(
iterate_with_template_sources(
frames,
with_locals=with_locals,
library_frame_context_lines=library_frame_context_lines,
in_app_frame_context_lines=in_app_frame_context_lines,
include_paths_re=self.include_paths_re,
exclude_paths_re=self.exclude_paths_re,
locals_processor_func=locals_processor_func,
)
)
def send(self, url, **kwargs):
if self.config.server_url:
return super(DjangoClient, self).send(url, **kwargs)
else:
self.error_logger.error("No server configured, and elasticapm not installed. Cannot send message")
return None
class ProxyClient(object):
__members__ = property(lambda x: x.__dir__())
__class__ = property(lambda x: get_client().__class__)
__dict__ = property(lambda o: get_client().__dict__)
__repr__ = lambda: repr(get_client())
__getattr__ = lambda x, o: getattr(get_client(), o)
__setattr__ = lambda x, o, v: setattr(get_client(), o, v)
__delattr__ = lambda x, o: delattr(get_client(), o)
__lt__ = lambda x, o: get_client() < o
__le__ = lambda x, o: get_client() <= o
__eq__ = lambda x, o: get_client() == o
__ne__ = lambda x, o: get_client() != o
__gt__ = lambda x, o: get_client() > o
__ge__ = lambda x, o: get_client() >= o
if compat.PY2:
__cmp__ = lambda x, o: cmp(get_client(), o)
__hash__ = lambda x: hash(get_client())
__nonzero__ = lambda x: bool(get_client())
__len__ = lambda x: len(get_client())
__getitem__ = lambda x, i: get_client()[i]
__iter__ = lambda x: iter(get_client())
__contains__ = lambda x, i: i in get_client()
__getslice__ = lambda x, i, j: get_client()[i:j]
__add__ = lambda x, o: get_client() + o
__sub__ = lambda x, o: get_client() - o
__mul__ = lambda x, o: get_client() * o
__floordiv__ = lambda x, o: get_client() // o
__mod__ = lambda x, o: get_client() % o
__divmod__ = lambda x, o: get_client().__divmod__(o)
__pow__ = lambda x, o: get_client() ** o
__lshift__ = lambda x, o: get_client() << o
__rshift__ = lambda x, o: get_client() >> o
__and__ = lambda x, o: get_client() & o
__xor__ = lambda x, o: get_client() ^ o
__or__ = lambda x, o: get_client() | o
__div__ = lambda x, o: get_client().__div__(o)
__truediv__ = lambda x, o: get_client().__truediv__(o)
__neg__ = lambda x: -(get_client())
__pos__ = lambda x: +(get_client())
__abs__ = lambda x: abs(get_client())
__invert__ = lambda x: ~(get_client())
__complex__ = lambda x: complex(get_client())
__int__ = lambda x: int(get_client())
if compat.PY2:
__long__ = lambda x: long(get_client())
__float__ = lambda x: float(get_client())
__str__ = lambda x: str(get_client())
__unicode__ = lambda x: compat.text_type(get_client())
__oct__ = lambda x: oct(get_client())
__hex__ = lambda x: hex(get_client())
__index__ = lambda x: get_client().__index__()
__coerce__ = lambda x, o: x.__coerce__(x, o)
__enter__ = lambda x: x.__enter__()
__exit__ = lambda x, *a, **kw: x.__exit__(*a, **kw)
client = ProxyClient()
def _get_installed_apps_paths():
out = set()
for app in django_settings.INSTALLED_APPS:
out.add(app)
return out
| true
| true
|
f705783686a1d9d13021c5a0cc244a45478a4753
| 932
|
py
|
Python
|
setup.py
|
abhijitbendale/rls-lab
|
dedff01b9af01e06d0d6cd52df5532361cd893b1
|
[
"BSD-4-Clause"
] | null | null | null |
setup.py
|
abhijitbendale/rls-lab
|
dedff01b9af01e06d0d6cd52df5532361cd893b1
|
[
"BSD-4-Clause"
] | null | null | null |
setup.py
|
abhijitbendale/rls-lab
|
dedff01b9af01e06d0d6cd52df5532361cd893b1
|
[
"BSD-4-Clause"
] | null | null | null |
#!/usr/bin/env python
from distutils.core import setup, Extension
import glob
import os
# Get matfiles and images for testing
matfiles=glob.glob(os.path.join('tests/data/*.mat'))
data=glob.glob(os.path.join('data/*'))
setup(
name='RLS',
version='1.0',
description='Python implementation of RLS program',
author='Abhijit Bendale',
author_email='bendale@mit.edu',
py_modules = ['rls_pipeline','tests.test_rlspackage',
'utils.linearRLS', 'utils.non_linear_rls',
'OptParserExtended'],
data_files = [('documentation',['documentation/notes.rst']),
('data', ['data/smp.mat']),
('tests/data', ['tests/data/smp.mat','tests/data/linear_rls.mat', 'tests/data/non_linear_rls.mat'])],
)
| 34.518519
| 208
| 0.531116
|
from distutils.core import setup, Extension
import glob
import os
matfiles=glob.glob(os.path.join('tests/data/*.mat'))
data=glob.glob(os.path.join('data/*'))
setup(
name='RLS',
version='1.0',
description='Python implementation of RLS program',
author='Abhijit Bendale',
author_email='bendale@mit.edu',
py_modules = ['rls_pipeline','tests.test_rlspackage',
'utils.linearRLS', 'utils.non_linear_rls',
'OptParserExtended'],
data_files = [('documentation',['documentation/notes.rst']),
('data', ['data/smp.mat']),
('tests/data', ['tests/data/smp.mat','tests/data/linear_rls.mat', 'tests/data/non_linear_rls.mat'])],
)
| true
| true
|
f705792361b745c5f11279f9c6b12a22432ba982
| 24,888
|
py
|
Python
|
alipay/aop/api/domain/ExSourceRateVO.py
|
articuly/alipay-sdk-python-all
|
0259cd28eca0f219b97dac7f41c2458441d5e7a6
|
[
"Apache-2.0"
] | null | null | null |
alipay/aop/api/domain/ExSourceRateVO.py
|
articuly/alipay-sdk-python-all
|
0259cd28eca0f219b97dac7f41c2458441d5e7a6
|
[
"Apache-2.0"
] | null | null | null |
alipay/aop/api/domain/ExSourceRateVO.py
|
articuly/alipay-sdk-python-all
|
0259cd28eca0f219b97dac7f41c2458441d5e7a6
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import simplejson as json
from alipay.aop.api.constant.ParamConstants import *
class ExSourceRateVO(object):
def __init__(self):
self._bid = None
self._currency_pair = None
self._currency_unit = None
self._expiry_time = None
self._extended_params = None
self._generate_date = None
self._generate_time = None
self._gmt_create = None
self._gmt_modified = None
self._guaranteed = None
self._id = None
self._inst = None
self._inst_rate_reference_id = None
self._is_exception = None
self._is_flat = None
self._is_formatted = None
self._is_valid = None
self._maturity_date = None
self._maximum_bid_amount = None
self._maximum_offer_amount = None
self._memo = None
self._mid = None
self._minimum_bid_amount = None
self._minimum_offer_amount = None
self._offer = None
self._on_off_shore = None
self._period = None
self._profile = None
self._quote_type = None
self._rate_method = None
self._rate_source_code = None
self._rate_type = None
self._segment_id = None
self._sp_bid = None
self._sp_mid = None
self._sp_offer = None
self._start_time = None
self._sub_inst = None
self._threshold_time = None
self._valid_time = None
self._zone_expiry_time = None
self._zone_generate_time = None
self._zone_gmt_create = None
self._zone_gmt_modified = None
self._zone_start_time = None
self._zone_threshold_time = None
self._zone_valid_time = None
@property
def bid(self):
return self._bid
@bid.setter
def bid(self, value):
self._bid = value
@property
def currency_pair(self):
return self._currency_pair
@currency_pair.setter
def currency_pair(self, value):
self._currency_pair = value
@property
def currency_unit(self):
return self._currency_unit
@currency_unit.setter
def currency_unit(self, value):
self._currency_unit = value
@property
def expiry_time(self):
return self._expiry_time
@expiry_time.setter
def expiry_time(self, value):
self._expiry_time = value
@property
def extended_params(self):
return self._extended_params
@extended_params.setter
def extended_params(self, value):
self._extended_params = value
@property
def generate_date(self):
return self._generate_date
@generate_date.setter
def generate_date(self, value):
self._generate_date = value
@property
def generate_time(self):
return self._generate_time
@generate_time.setter
def generate_time(self, value):
self._generate_time = value
@property
def gmt_create(self):
return self._gmt_create
@gmt_create.setter
def gmt_create(self, value):
self._gmt_create = value
@property
def gmt_modified(self):
return self._gmt_modified
@gmt_modified.setter
def gmt_modified(self, value):
self._gmt_modified = value
@property
def guaranteed(self):
return self._guaranteed
@guaranteed.setter
def guaranteed(self, value):
self._guaranteed = value
@property
def id(self):
return self._id
@id.setter
def id(self, value):
self._id = value
@property
def inst(self):
return self._inst
@inst.setter
def inst(self, value):
self._inst = value
@property
def inst_rate_reference_id(self):
return self._inst_rate_reference_id
@inst_rate_reference_id.setter
def inst_rate_reference_id(self, value):
self._inst_rate_reference_id = value
@property
def is_exception(self):
return self._is_exception
@is_exception.setter
def is_exception(self, value):
self._is_exception = value
@property
def is_flat(self):
return self._is_flat
@is_flat.setter
def is_flat(self, value):
self._is_flat = value
@property
def is_formatted(self):
return self._is_formatted
@is_formatted.setter
def is_formatted(self, value):
self._is_formatted = value
@property
def is_valid(self):
return self._is_valid
@is_valid.setter
def is_valid(self, value):
self._is_valid = value
@property
def maturity_date(self):
return self._maturity_date
@maturity_date.setter
def maturity_date(self, value):
self._maturity_date = value
@property
def maximum_bid_amount(self):
return self._maximum_bid_amount
@maximum_bid_amount.setter
def maximum_bid_amount(self, value):
self._maximum_bid_amount = value
@property
def maximum_offer_amount(self):
return self._maximum_offer_amount
@maximum_offer_amount.setter
def maximum_offer_amount(self, value):
self._maximum_offer_amount = value
@property
def memo(self):
return self._memo
@memo.setter
def memo(self, value):
self._memo = value
@property
def mid(self):
return self._mid
@mid.setter
def mid(self, value):
self._mid = value
@property
def minimum_bid_amount(self):
return self._minimum_bid_amount
@minimum_bid_amount.setter
def minimum_bid_amount(self, value):
self._minimum_bid_amount = value
@property
def minimum_offer_amount(self):
return self._minimum_offer_amount
@minimum_offer_amount.setter
def minimum_offer_amount(self, value):
self._minimum_offer_amount = value
@property
def offer(self):
return self._offer
@offer.setter
def offer(self, value):
self._offer = value
@property
def on_off_shore(self):
return self._on_off_shore
@on_off_shore.setter
def on_off_shore(self, value):
self._on_off_shore = value
@property
def period(self):
return self._period
@period.setter
def period(self, value):
self._period = value
@property
def profile(self):
return self._profile
@profile.setter
def profile(self, value):
self._profile = value
@property
def quote_type(self):
return self._quote_type
@quote_type.setter
def quote_type(self, value):
self._quote_type = value
@property
def rate_method(self):
return self._rate_method
@rate_method.setter
def rate_method(self, value):
self._rate_method = value
@property
def rate_source_code(self):
return self._rate_source_code
@rate_source_code.setter
def rate_source_code(self, value):
self._rate_source_code = value
@property
def rate_type(self):
return self._rate_type
@rate_type.setter
def rate_type(self, value):
self._rate_type = value
@property
def segment_id(self):
return self._segment_id
@segment_id.setter
def segment_id(self, value):
self._segment_id = value
@property
def sp_bid(self):
return self._sp_bid
@sp_bid.setter
def sp_bid(self, value):
self._sp_bid = value
@property
def sp_mid(self):
return self._sp_mid
@sp_mid.setter
def sp_mid(self, value):
self._sp_mid = value
@property
def sp_offer(self):
return self._sp_offer
@sp_offer.setter
def sp_offer(self, value):
self._sp_offer = value
@property
def start_time(self):
return self._start_time
@start_time.setter
def start_time(self, value):
self._start_time = value
@property
def sub_inst(self):
return self._sub_inst
@sub_inst.setter
def sub_inst(self, value):
self._sub_inst = value
@property
def threshold_time(self):
return self._threshold_time
@threshold_time.setter
def threshold_time(self, value):
self._threshold_time = value
@property
def valid_time(self):
return self._valid_time
@valid_time.setter
def valid_time(self, value):
self._valid_time = value
@property
def zone_expiry_time(self):
return self._zone_expiry_time
@zone_expiry_time.setter
def zone_expiry_time(self, value):
self._zone_expiry_time = value
@property
def zone_generate_time(self):
return self._zone_generate_time
@zone_generate_time.setter
def zone_generate_time(self, value):
self._zone_generate_time = value
@property
def zone_gmt_create(self):
return self._zone_gmt_create
@zone_gmt_create.setter
def zone_gmt_create(self, value):
self._zone_gmt_create = value
@property
def zone_gmt_modified(self):
return self._zone_gmt_modified
@zone_gmt_modified.setter
def zone_gmt_modified(self, value):
self._zone_gmt_modified = value
@property
def zone_start_time(self):
return self._zone_start_time
@zone_start_time.setter
def zone_start_time(self, value):
self._zone_start_time = value
@property
def zone_threshold_time(self):
return self._zone_threshold_time
@zone_threshold_time.setter
def zone_threshold_time(self, value):
self._zone_threshold_time = value
@property
def zone_valid_time(self):
return self._zone_valid_time
@zone_valid_time.setter
def zone_valid_time(self, value):
self._zone_valid_time = value
def to_alipay_dict(self):
params = dict()
if self.bid:
if hasattr(self.bid, 'to_alipay_dict'):
params['bid'] = self.bid.to_alipay_dict()
else:
params['bid'] = self.bid
if self.currency_pair:
if hasattr(self.currency_pair, 'to_alipay_dict'):
params['currency_pair'] = self.currency_pair.to_alipay_dict()
else:
params['currency_pair'] = self.currency_pair
if self.currency_unit:
if hasattr(self.currency_unit, 'to_alipay_dict'):
params['currency_unit'] = self.currency_unit.to_alipay_dict()
else:
params['currency_unit'] = self.currency_unit
if self.expiry_time:
if hasattr(self.expiry_time, 'to_alipay_dict'):
params['expiry_time'] = self.expiry_time.to_alipay_dict()
else:
params['expiry_time'] = self.expiry_time
if self.extended_params:
if hasattr(self.extended_params, 'to_alipay_dict'):
params['extended_params'] = self.extended_params.to_alipay_dict()
else:
params['extended_params'] = self.extended_params
if self.generate_date:
if hasattr(self.generate_date, 'to_alipay_dict'):
params['generate_date'] = self.generate_date.to_alipay_dict()
else:
params['generate_date'] = self.generate_date
if self.generate_time:
if hasattr(self.generate_time, 'to_alipay_dict'):
params['generate_time'] = self.generate_time.to_alipay_dict()
else:
params['generate_time'] = self.generate_time
if self.gmt_create:
if hasattr(self.gmt_create, 'to_alipay_dict'):
params['gmt_create'] = self.gmt_create.to_alipay_dict()
else:
params['gmt_create'] = self.gmt_create
if self.gmt_modified:
if hasattr(self.gmt_modified, 'to_alipay_dict'):
params['gmt_modified'] = self.gmt_modified.to_alipay_dict()
else:
params['gmt_modified'] = self.gmt_modified
if self.guaranteed:
if hasattr(self.guaranteed, 'to_alipay_dict'):
params['guaranteed'] = self.guaranteed.to_alipay_dict()
else:
params['guaranteed'] = self.guaranteed
if self.id:
if hasattr(self.id, 'to_alipay_dict'):
params['id'] = self.id.to_alipay_dict()
else:
params['id'] = self.id
if self.inst:
if hasattr(self.inst, 'to_alipay_dict'):
params['inst'] = self.inst.to_alipay_dict()
else:
params['inst'] = self.inst
if self.inst_rate_reference_id:
if hasattr(self.inst_rate_reference_id, 'to_alipay_dict'):
params['inst_rate_reference_id'] = self.inst_rate_reference_id.to_alipay_dict()
else:
params['inst_rate_reference_id'] = self.inst_rate_reference_id
if self.is_exception:
if hasattr(self.is_exception, 'to_alipay_dict'):
params['is_exception'] = self.is_exception.to_alipay_dict()
else:
params['is_exception'] = self.is_exception
if self.is_flat:
if hasattr(self.is_flat, 'to_alipay_dict'):
params['is_flat'] = self.is_flat.to_alipay_dict()
else:
params['is_flat'] = self.is_flat
if self.is_formatted:
if hasattr(self.is_formatted, 'to_alipay_dict'):
params['is_formatted'] = self.is_formatted.to_alipay_dict()
else:
params['is_formatted'] = self.is_formatted
if self.is_valid:
if hasattr(self.is_valid, 'to_alipay_dict'):
params['is_valid'] = self.is_valid.to_alipay_dict()
else:
params['is_valid'] = self.is_valid
if self.maturity_date:
if hasattr(self.maturity_date, 'to_alipay_dict'):
params['maturity_date'] = self.maturity_date.to_alipay_dict()
else:
params['maturity_date'] = self.maturity_date
if self.maximum_bid_amount:
if hasattr(self.maximum_bid_amount, 'to_alipay_dict'):
params['maximum_bid_amount'] = self.maximum_bid_amount.to_alipay_dict()
else:
params['maximum_bid_amount'] = self.maximum_bid_amount
if self.maximum_offer_amount:
if hasattr(self.maximum_offer_amount, 'to_alipay_dict'):
params['maximum_offer_amount'] = self.maximum_offer_amount.to_alipay_dict()
else:
params['maximum_offer_amount'] = self.maximum_offer_amount
if self.memo:
if hasattr(self.memo, 'to_alipay_dict'):
params['memo'] = self.memo.to_alipay_dict()
else:
params['memo'] = self.memo
if self.mid:
if hasattr(self.mid, 'to_alipay_dict'):
params['mid'] = self.mid.to_alipay_dict()
else:
params['mid'] = self.mid
if self.minimum_bid_amount:
if hasattr(self.minimum_bid_amount, 'to_alipay_dict'):
params['minimum_bid_amount'] = self.minimum_bid_amount.to_alipay_dict()
else:
params['minimum_bid_amount'] = self.minimum_bid_amount
if self.minimum_offer_amount:
if hasattr(self.minimum_offer_amount, 'to_alipay_dict'):
params['minimum_offer_amount'] = self.minimum_offer_amount.to_alipay_dict()
else:
params['minimum_offer_amount'] = self.minimum_offer_amount
if self.offer:
if hasattr(self.offer, 'to_alipay_dict'):
params['offer'] = self.offer.to_alipay_dict()
else:
params['offer'] = self.offer
if self.on_off_shore:
if hasattr(self.on_off_shore, 'to_alipay_dict'):
params['on_off_shore'] = self.on_off_shore.to_alipay_dict()
else:
params['on_off_shore'] = self.on_off_shore
if self.period:
if hasattr(self.period, 'to_alipay_dict'):
params['period'] = self.period.to_alipay_dict()
else:
params['period'] = self.period
if self.profile:
if hasattr(self.profile, 'to_alipay_dict'):
params['profile'] = self.profile.to_alipay_dict()
else:
params['profile'] = self.profile
if self.quote_type:
if hasattr(self.quote_type, 'to_alipay_dict'):
params['quote_type'] = self.quote_type.to_alipay_dict()
else:
params['quote_type'] = self.quote_type
if self.rate_method:
if hasattr(self.rate_method, 'to_alipay_dict'):
params['rate_method'] = self.rate_method.to_alipay_dict()
else:
params['rate_method'] = self.rate_method
if self.rate_source_code:
if hasattr(self.rate_source_code, 'to_alipay_dict'):
params['rate_source_code'] = self.rate_source_code.to_alipay_dict()
else:
params['rate_source_code'] = self.rate_source_code
if self.rate_type:
if hasattr(self.rate_type, 'to_alipay_dict'):
params['rate_type'] = self.rate_type.to_alipay_dict()
else:
params['rate_type'] = self.rate_type
if self.segment_id:
if hasattr(self.segment_id, 'to_alipay_dict'):
params['segment_id'] = self.segment_id.to_alipay_dict()
else:
params['segment_id'] = self.segment_id
if self.sp_bid:
if hasattr(self.sp_bid, 'to_alipay_dict'):
params['sp_bid'] = self.sp_bid.to_alipay_dict()
else:
params['sp_bid'] = self.sp_bid
if self.sp_mid:
if hasattr(self.sp_mid, 'to_alipay_dict'):
params['sp_mid'] = self.sp_mid.to_alipay_dict()
else:
params['sp_mid'] = self.sp_mid
if self.sp_offer:
if hasattr(self.sp_offer, 'to_alipay_dict'):
params['sp_offer'] = self.sp_offer.to_alipay_dict()
else:
params['sp_offer'] = self.sp_offer
if self.start_time:
if hasattr(self.start_time, 'to_alipay_dict'):
params['start_time'] = self.start_time.to_alipay_dict()
else:
params['start_time'] = self.start_time
if self.sub_inst:
if hasattr(self.sub_inst, 'to_alipay_dict'):
params['sub_inst'] = self.sub_inst.to_alipay_dict()
else:
params['sub_inst'] = self.sub_inst
if self.threshold_time:
if hasattr(self.threshold_time, 'to_alipay_dict'):
params['threshold_time'] = self.threshold_time.to_alipay_dict()
else:
params['threshold_time'] = self.threshold_time
if self.valid_time:
if hasattr(self.valid_time, 'to_alipay_dict'):
params['valid_time'] = self.valid_time.to_alipay_dict()
else:
params['valid_time'] = self.valid_time
if self.zone_expiry_time:
if hasattr(self.zone_expiry_time, 'to_alipay_dict'):
params['zone_expiry_time'] = self.zone_expiry_time.to_alipay_dict()
else:
params['zone_expiry_time'] = self.zone_expiry_time
if self.zone_generate_time:
if hasattr(self.zone_generate_time, 'to_alipay_dict'):
params['zone_generate_time'] = self.zone_generate_time.to_alipay_dict()
else:
params['zone_generate_time'] = self.zone_generate_time
if self.zone_gmt_create:
if hasattr(self.zone_gmt_create, 'to_alipay_dict'):
params['zone_gmt_create'] = self.zone_gmt_create.to_alipay_dict()
else:
params['zone_gmt_create'] = self.zone_gmt_create
if self.zone_gmt_modified:
if hasattr(self.zone_gmt_modified, 'to_alipay_dict'):
params['zone_gmt_modified'] = self.zone_gmt_modified.to_alipay_dict()
else:
params['zone_gmt_modified'] = self.zone_gmt_modified
if self.zone_start_time:
if hasattr(self.zone_start_time, 'to_alipay_dict'):
params['zone_start_time'] = self.zone_start_time.to_alipay_dict()
else:
params['zone_start_time'] = self.zone_start_time
if self.zone_threshold_time:
if hasattr(self.zone_threshold_time, 'to_alipay_dict'):
params['zone_threshold_time'] = self.zone_threshold_time.to_alipay_dict()
else:
params['zone_threshold_time'] = self.zone_threshold_time
if self.zone_valid_time:
if hasattr(self.zone_valid_time, 'to_alipay_dict'):
params['zone_valid_time'] = self.zone_valid_time.to_alipay_dict()
else:
params['zone_valid_time'] = self.zone_valid_time
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = ExSourceRateVO()
if 'bid' in d:
o.bid = d['bid']
if 'currency_pair' in d:
o.currency_pair = d['currency_pair']
if 'currency_unit' in d:
o.currency_unit = d['currency_unit']
if 'expiry_time' in d:
o.expiry_time = d['expiry_time']
if 'extended_params' in d:
o.extended_params = d['extended_params']
if 'generate_date' in d:
o.generate_date = d['generate_date']
if 'generate_time' in d:
o.generate_time = d['generate_time']
if 'gmt_create' in d:
o.gmt_create = d['gmt_create']
if 'gmt_modified' in d:
o.gmt_modified = d['gmt_modified']
if 'guaranteed' in d:
o.guaranteed = d['guaranteed']
if 'id' in d:
o.id = d['id']
if 'inst' in d:
o.inst = d['inst']
if 'inst_rate_reference_id' in d:
o.inst_rate_reference_id = d['inst_rate_reference_id']
if 'is_exception' in d:
o.is_exception = d['is_exception']
if 'is_flat' in d:
o.is_flat = d['is_flat']
if 'is_formatted' in d:
o.is_formatted = d['is_formatted']
if 'is_valid' in d:
o.is_valid = d['is_valid']
if 'maturity_date' in d:
o.maturity_date = d['maturity_date']
if 'maximum_bid_amount' in d:
o.maximum_bid_amount = d['maximum_bid_amount']
if 'maximum_offer_amount' in d:
o.maximum_offer_amount = d['maximum_offer_amount']
if 'memo' in d:
o.memo = d['memo']
if 'mid' in d:
o.mid = d['mid']
if 'minimum_bid_amount' in d:
o.minimum_bid_amount = d['minimum_bid_amount']
if 'minimum_offer_amount' in d:
o.minimum_offer_amount = d['minimum_offer_amount']
if 'offer' in d:
o.offer = d['offer']
if 'on_off_shore' in d:
o.on_off_shore = d['on_off_shore']
if 'period' in d:
o.period = d['period']
if 'profile' in d:
o.profile = d['profile']
if 'quote_type' in d:
o.quote_type = d['quote_type']
if 'rate_method' in d:
o.rate_method = d['rate_method']
if 'rate_source_code' in d:
o.rate_source_code = d['rate_source_code']
if 'rate_type' in d:
o.rate_type = d['rate_type']
if 'segment_id' in d:
o.segment_id = d['segment_id']
if 'sp_bid' in d:
o.sp_bid = d['sp_bid']
if 'sp_mid' in d:
o.sp_mid = d['sp_mid']
if 'sp_offer' in d:
o.sp_offer = d['sp_offer']
if 'start_time' in d:
o.start_time = d['start_time']
if 'sub_inst' in d:
o.sub_inst = d['sub_inst']
if 'threshold_time' in d:
o.threshold_time = d['threshold_time']
if 'valid_time' in d:
o.valid_time = d['valid_time']
if 'zone_expiry_time' in d:
o.zone_expiry_time = d['zone_expiry_time']
if 'zone_generate_time' in d:
o.zone_generate_time = d['zone_generate_time']
if 'zone_gmt_create' in d:
o.zone_gmt_create = d['zone_gmt_create']
if 'zone_gmt_modified' in d:
o.zone_gmt_modified = d['zone_gmt_modified']
if 'zone_start_time' in d:
o.zone_start_time = d['zone_start_time']
if 'zone_threshold_time' in d:
o.zone_threshold_time = d['zone_threshold_time']
if 'zone_valid_time' in d:
o.zone_valid_time = d['zone_valid_time']
return o
| 34.046512
| 95
| 0.600932
|
import simplejson as json
from alipay.aop.api.constant.ParamConstants import *
class ExSourceRateVO(object):
def __init__(self):
self._bid = None
self._currency_pair = None
self._currency_unit = None
self._expiry_time = None
self._extended_params = None
self._generate_date = None
self._generate_time = None
self._gmt_create = None
self._gmt_modified = None
self._guaranteed = None
self._id = None
self._inst = None
self._inst_rate_reference_id = None
self._is_exception = None
self._is_flat = None
self._is_formatted = None
self._is_valid = None
self._maturity_date = None
self._maximum_bid_amount = None
self._maximum_offer_amount = None
self._memo = None
self._mid = None
self._minimum_bid_amount = None
self._minimum_offer_amount = None
self._offer = None
self._on_off_shore = None
self._period = None
self._profile = None
self._quote_type = None
self._rate_method = None
self._rate_source_code = None
self._rate_type = None
self._segment_id = None
self._sp_bid = None
self._sp_mid = None
self._sp_offer = None
self._start_time = None
self._sub_inst = None
self._threshold_time = None
self._valid_time = None
self._zone_expiry_time = None
self._zone_generate_time = None
self._zone_gmt_create = None
self._zone_gmt_modified = None
self._zone_start_time = None
self._zone_threshold_time = None
self._zone_valid_time = None
@property
def bid(self):
return self._bid
@bid.setter
def bid(self, value):
self._bid = value
@property
def currency_pair(self):
return self._currency_pair
@currency_pair.setter
def currency_pair(self, value):
self._currency_pair = value
@property
def currency_unit(self):
return self._currency_unit
@currency_unit.setter
def currency_unit(self, value):
self._currency_unit = value
@property
def expiry_time(self):
return self._expiry_time
@expiry_time.setter
def expiry_time(self, value):
self._expiry_time = value
@property
def extended_params(self):
return self._extended_params
@extended_params.setter
def extended_params(self, value):
self._extended_params = value
@property
def generate_date(self):
return self._generate_date
@generate_date.setter
def generate_date(self, value):
self._generate_date = value
@property
def generate_time(self):
return self._generate_time
@generate_time.setter
def generate_time(self, value):
self._generate_time = value
@property
def gmt_create(self):
return self._gmt_create
@gmt_create.setter
def gmt_create(self, value):
self._gmt_create = value
@property
def gmt_modified(self):
return self._gmt_modified
@gmt_modified.setter
def gmt_modified(self, value):
self._gmt_modified = value
@property
def guaranteed(self):
return self._guaranteed
@guaranteed.setter
def guaranteed(self, value):
self._guaranteed = value
@property
def id(self):
return self._id
@id.setter
def id(self, value):
self._id = value
@property
def inst(self):
return self._inst
@inst.setter
def inst(self, value):
self._inst = value
@property
def inst_rate_reference_id(self):
return self._inst_rate_reference_id
@inst_rate_reference_id.setter
def inst_rate_reference_id(self, value):
self._inst_rate_reference_id = value
@property
def is_exception(self):
return self._is_exception
@is_exception.setter
def is_exception(self, value):
self._is_exception = value
@property
def is_flat(self):
return self._is_flat
@is_flat.setter
def is_flat(self, value):
self._is_flat = value
@property
def is_formatted(self):
return self._is_formatted
@is_formatted.setter
def is_formatted(self, value):
self._is_formatted = value
@property
def is_valid(self):
return self._is_valid
@is_valid.setter
def is_valid(self, value):
self._is_valid = value
@property
def maturity_date(self):
return self._maturity_date
@maturity_date.setter
def maturity_date(self, value):
self._maturity_date = value
@property
def maximum_bid_amount(self):
return self._maximum_bid_amount
@maximum_bid_amount.setter
def maximum_bid_amount(self, value):
self._maximum_bid_amount = value
@property
def maximum_offer_amount(self):
return self._maximum_offer_amount
@maximum_offer_amount.setter
def maximum_offer_amount(self, value):
self._maximum_offer_amount = value
@property
def memo(self):
return self._memo
@memo.setter
def memo(self, value):
self._memo = value
@property
def mid(self):
return self._mid
@mid.setter
def mid(self, value):
self._mid = value
@property
def minimum_bid_amount(self):
return self._minimum_bid_amount
@minimum_bid_amount.setter
def minimum_bid_amount(self, value):
self._minimum_bid_amount = value
@property
def minimum_offer_amount(self):
return self._minimum_offer_amount
@minimum_offer_amount.setter
def minimum_offer_amount(self, value):
self._minimum_offer_amount = value
@property
def offer(self):
return self._offer
@offer.setter
def offer(self, value):
self._offer = value
@property
def on_off_shore(self):
return self._on_off_shore
@on_off_shore.setter
def on_off_shore(self, value):
self._on_off_shore = value
@property
def period(self):
return self._period
@period.setter
def period(self, value):
self._period = value
@property
def profile(self):
return self._profile
@profile.setter
def profile(self, value):
self._profile = value
@property
def quote_type(self):
return self._quote_type
@quote_type.setter
def quote_type(self, value):
self._quote_type = value
@property
def rate_method(self):
return self._rate_method
@rate_method.setter
def rate_method(self, value):
self._rate_method = value
@property
def rate_source_code(self):
return self._rate_source_code
@rate_source_code.setter
def rate_source_code(self, value):
self._rate_source_code = value
@property
def rate_type(self):
return self._rate_type
@rate_type.setter
def rate_type(self, value):
self._rate_type = value
@property
def segment_id(self):
return self._segment_id
@segment_id.setter
def segment_id(self, value):
self._segment_id = value
@property
def sp_bid(self):
return self._sp_bid
@sp_bid.setter
def sp_bid(self, value):
self._sp_bid = value
@property
def sp_mid(self):
return self._sp_mid
@sp_mid.setter
def sp_mid(self, value):
self._sp_mid = value
@property
def sp_offer(self):
return self._sp_offer
@sp_offer.setter
def sp_offer(self, value):
self._sp_offer = value
@property
def start_time(self):
return self._start_time
@start_time.setter
def start_time(self, value):
self._start_time = value
@property
def sub_inst(self):
return self._sub_inst
@sub_inst.setter
def sub_inst(self, value):
self._sub_inst = value
@property
def threshold_time(self):
return self._threshold_time
@threshold_time.setter
def threshold_time(self, value):
self._threshold_time = value
@property
def valid_time(self):
return self._valid_time
@valid_time.setter
def valid_time(self, value):
self._valid_time = value
@property
def zone_expiry_time(self):
return self._zone_expiry_time
@zone_expiry_time.setter
def zone_expiry_time(self, value):
self._zone_expiry_time = value
@property
def zone_generate_time(self):
return self._zone_generate_time
@zone_generate_time.setter
def zone_generate_time(self, value):
self._zone_generate_time = value
@property
def zone_gmt_create(self):
return self._zone_gmt_create
@zone_gmt_create.setter
def zone_gmt_create(self, value):
self._zone_gmt_create = value
@property
def zone_gmt_modified(self):
return self._zone_gmt_modified
@zone_gmt_modified.setter
def zone_gmt_modified(self, value):
self._zone_gmt_modified = value
@property
def zone_start_time(self):
return self._zone_start_time
@zone_start_time.setter
def zone_start_time(self, value):
self._zone_start_time = value
@property
def zone_threshold_time(self):
return self._zone_threshold_time
@zone_threshold_time.setter
def zone_threshold_time(self, value):
self._zone_threshold_time = value
@property
def zone_valid_time(self):
return self._zone_valid_time
@zone_valid_time.setter
def zone_valid_time(self, value):
self._zone_valid_time = value
def to_alipay_dict(self):
params = dict()
if self.bid:
if hasattr(self.bid, 'to_alipay_dict'):
params['bid'] = self.bid.to_alipay_dict()
else:
params['bid'] = self.bid
if self.currency_pair:
if hasattr(self.currency_pair, 'to_alipay_dict'):
params['currency_pair'] = self.currency_pair.to_alipay_dict()
else:
params['currency_pair'] = self.currency_pair
if self.currency_unit:
if hasattr(self.currency_unit, 'to_alipay_dict'):
params['currency_unit'] = self.currency_unit.to_alipay_dict()
else:
params['currency_unit'] = self.currency_unit
if self.expiry_time:
if hasattr(self.expiry_time, 'to_alipay_dict'):
params['expiry_time'] = self.expiry_time.to_alipay_dict()
else:
params['expiry_time'] = self.expiry_time
if self.extended_params:
if hasattr(self.extended_params, 'to_alipay_dict'):
params['extended_params'] = self.extended_params.to_alipay_dict()
else:
params['extended_params'] = self.extended_params
if self.generate_date:
if hasattr(self.generate_date, 'to_alipay_dict'):
params['generate_date'] = self.generate_date.to_alipay_dict()
else:
params['generate_date'] = self.generate_date
if self.generate_time:
if hasattr(self.generate_time, 'to_alipay_dict'):
params['generate_time'] = self.generate_time.to_alipay_dict()
else:
params['generate_time'] = self.generate_time
if self.gmt_create:
if hasattr(self.gmt_create, 'to_alipay_dict'):
params['gmt_create'] = self.gmt_create.to_alipay_dict()
else:
params['gmt_create'] = self.gmt_create
if self.gmt_modified:
if hasattr(self.gmt_modified, 'to_alipay_dict'):
params['gmt_modified'] = self.gmt_modified.to_alipay_dict()
else:
params['gmt_modified'] = self.gmt_modified
if self.guaranteed:
if hasattr(self.guaranteed, 'to_alipay_dict'):
params['guaranteed'] = self.guaranteed.to_alipay_dict()
else:
params['guaranteed'] = self.guaranteed
if self.id:
if hasattr(self.id, 'to_alipay_dict'):
params['id'] = self.id.to_alipay_dict()
else:
params['id'] = self.id
if self.inst:
if hasattr(self.inst, 'to_alipay_dict'):
params['inst'] = self.inst.to_alipay_dict()
else:
params['inst'] = self.inst
if self.inst_rate_reference_id:
if hasattr(self.inst_rate_reference_id, 'to_alipay_dict'):
params['inst_rate_reference_id'] = self.inst_rate_reference_id.to_alipay_dict()
else:
params['inst_rate_reference_id'] = self.inst_rate_reference_id
if self.is_exception:
if hasattr(self.is_exception, 'to_alipay_dict'):
params['is_exception'] = self.is_exception.to_alipay_dict()
else:
params['is_exception'] = self.is_exception
if self.is_flat:
if hasattr(self.is_flat, 'to_alipay_dict'):
params['is_flat'] = self.is_flat.to_alipay_dict()
else:
params['is_flat'] = self.is_flat
if self.is_formatted:
if hasattr(self.is_formatted, 'to_alipay_dict'):
params['is_formatted'] = self.is_formatted.to_alipay_dict()
else:
params['is_formatted'] = self.is_formatted
if self.is_valid:
if hasattr(self.is_valid, 'to_alipay_dict'):
params['is_valid'] = self.is_valid.to_alipay_dict()
else:
params['is_valid'] = self.is_valid
if self.maturity_date:
if hasattr(self.maturity_date, 'to_alipay_dict'):
params['maturity_date'] = self.maturity_date.to_alipay_dict()
else:
params['maturity_date'] = self.maturity_date
if self.maximum_bid_amount:
if hasattr(self.maximum_bid_amount, 'to_alipay_dict'):
params['maximum_bid_amount'] = self.maximum_bid_amount.to_alipay_dict()
else:
params['maximum_bid_amount'] = self.maximum_bid_amount
if self.maximum_offer_amount:
if hasattr(self.maximum_offer_amount, 'to_alipay_dict'):
params['maximum_offer_amount'] = self.maximum_offer_amount.to_alipay_dict()
else:
params['maximum_offer_amount'] = self.maximum_offer_amount
if self.memo:
if hasattr(self.memo, 'to_alipay_dict'):
params['memo'] = self.memo.to_alipay_dict()
else:
params['memo'] = self.memo
if self.mid:
if hasattr(self.mid, 'to_alipay_dict'):
params['mid'] = self.mid.to_alipay_dict()
else:
params['mid'] = self.mid
if self.minimum_bid_amount:
if hasattr(self.minimum_bid_amount, 'to_alipay_dict'):
params['minimum_bid_amount'] = self.minimum_bid_amount.to_alipay_dict()
else:
params['minimum_bid_amount'] = self.minimum_bid_amount
if self.minimum_offer_amount:
if hasattr(self.minimum_offer_amount, 'to_alipay_dict'):
params['minimum_offer_amount'] = self.minimum_offer_amount.to_alipay_dict()
else:
params['minimum_offer_amount'] = self.minimum_offer_amount
if self.offer:
if hasattr(self.offer, 'to_alipay_dict'):
params['offer'] = self.offer.to_alipay_dict()
else:
params['offer'] = self.offer
if self.on_off_shore:
if hasattr(self.on_off_shore, 'to_alipay_dict'):
params['on_off_shore'] = self.on_off_shore.to_alipay_dict()
else:
params['on_off_shore'] = self.on_off_shore
if self.period:
if hasattr(self.period, 'to_alipay_dict'):
params['period'] = self.period.to_alipay_dict()
else:
params['period'] = self.period
if self.profile:
if hasattr(self.profile, 'to_alipay_dict'):
params['profile'] = self.profile.to_alipay_dict()
else:
params['profile'] = self.profile
if self.quote_type:
if hasattr(self.quote_type, 'to_alipay_dict'):
params['quote_type'] = self.quote_type.to_alipay_dict()
else:
params['quote_type'] = self.quote_type
if self.rate_method:
if hasattr(self.rate_method, 'to_alipay_dict'):
params['rate_method'] = self.rate_method.to_alipay_dict()
else:
params['rate_method'] = self.rate_method
if self.rate_source_code:
if hasattr(self.rate_source_code, 'to_alipay_dict'):
params['rate_source_code'] = self.rate_source_code.to_alipay_dict()
else:
params['rate_source_code'] = self.rate_source_code
if self.rate_type:
if hasattr(self.rate_type, 'to_alipay_dict'):
params['rate_type'] = self.rate_type.to_alipay_dict()
else:
params['rate_type'] = self.rate_type
if self.segment_id:
if hasattr(self.segment_id, 'to_alipay_dict'):
params['segment_id'] = self.segment_id.to_alipay_dict()
else:
params['segment_id'] = self.segment_id
if self.sp_bid:
if hasattr(self.sp_bid, 'to_alipay_dict'):
params['sp_bid'] = self.sp_bid.to_alipay_dict()
else:
params['sp_bid'] = self.sp_bid
if self.sp_mid:
if hasattr(self.sp_mid, 'to_alipay_dict'):
params['sp_mid'] = self.sp_mid.to_alipay_dict()
else:
params['sp_mid'] = self.sp_mid
if self.sp_offer:
if hasattr(self.sp_offer, 'to_alipay_dict'):
params['sp_offer'] = self.sp_offer.to_alipay_dict()
else:
params['sp_offer'] = self.sp_offer
if self.start_time:
if hasattr(self.start_time, 'to_alipay_dict'):
params['start_time'] = self.start_time.to_alipay_dict()
else:
params['start_time'] = self.start_time
if self.sub_inst:
if hasattr(self.sub_inst, 'to_alipay_dict'):
params['sub_inst'] = self.sub_inst.to_alipay_dict()
else:
params['sub_inst'] = self.sub_inst
if self.threshold_time:
if hasattr(self.threshold_time, 'to_alipay_dict'):
params['threshold_time'] = self.threshold_time.to_alipay_dict()
else:
params['threshold_time'] = self.threshold_time
if self.valid_time:
if hasattr(self.valid_time, 'to_alipay_dict'):
params['valid_time'] = self.valid_time.to_alipay_dict()
else:
params['valid_time'] = self.valid_time
if self.zone_expiry_time:
if hasattr(self.zone_expiry_time, 'to_alipay_dict'):
params['zone_expiry_time'] = self.zone_expiry_time.to_alipay_dict()
else:
params['zone_expiry_time'] = self.zone_expiry_time
if self.zone_generate_time:
if hasattr(self.zone_generate_time, 'to_alipay_dict'):
params['zone_generate_time'] = self.zone_generate_time.to_alipay_dict()
else:
params['zone_generate_time'] = self.zone_generate_time
if self.zone_gmt_create:
if hasattr(self.zone_gmt_create, 'to_alipay_dict'):
params['zone_gmt_create'] = self.zone_gmt_create.to_alipay_dict()
else:
params['zone_gmt_create'] = self.zone_gmt_create
if self.zone_gmt_modified:
if hasattr(self.zone_gmt_modified, 'to_alipay_dict'):
params['zone_gmt_modified'] = self.zone_gmt_modified.to_alipay_dict()
else:
params['zone_gmt_modified'] = self.zone_gmt_modified
if self.zone_start_time:
if hasattr(self.zone_start_time, 'to_alipay_dict'):
params['zone_start_time'] = self.zone_start_time.to_alipay_dict()
else:
params['zone_start_time'] = self.zone_start_time
if self.zone_threshold_time:
if hasattr(self.zone_threshold_time, 'to_alipay_dict'):
params['zone_threshold_time'] = self.zone_threshold_time.to_alipay_dict()
else:
params['zone_threshold_time'] = self.zone_threshold_time
if self.zone_valid_time:
if hasattr(self.zone_valid_time, 'to_alipay_dict'):
params['zone_valid_time'] = self.zone_valid_time.to_alipay_dict()
else:
params['zone_valid_time'] = self.zone_valid_time
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = ExSourceRateVO()
if 'bid' in d:
o.bid = d['bid']
if 'currency_pair' in d:
o.currency_pair = d['currency_pair']
if 'currency_unit' in d:
o.currency_unit = d['currency_unit']
if 'expiry_time' in d:
o.expiry_time = d['expiry_time']
if 'extended_params' in d:
o.extended_params = d['extended_params']
if 'generate_date' in d:
o.generate_date = d['generate_date']
if 'generate_time' in d:
o.generate_time = d['generate_time']
if 'gmt_create' in d:
o.gmt_create = d['gmt_create']
if 'gmt_modified' in d:
o.gmt_modified = d['gmt_modified']
if 'guaranteed' in d:
o.guaranteed = d['guaranteed']
if 'id' in d:
o.id = d['id']
if 'inst' in d:
o.inst = d['inst']
if 'inst_rate_reference_id' in d:
o.inst_rate_reference_id = d['inst_rate_reference_id']
if 'is_exception' in d:
o.is_exception = d['is_exception']
if 'is_flat' in d:
o.is_flat = d['is_flat']
if 'is_formatted' in d:
o.is_formatted = d['is_formatted']
if 'is_valid' in d:
o.is_valid = d['is_valid']
if 'maturity_date' in d:
o.maturity_date = d['maturity_date']
if 'maximum_bid_amount' in d:
o.maximum_bid_amount = d['maximum_bid_amount']
if 'maximum_offer_amount' in d:
o.maximum_offer_amount = d['maximum_offer_amount']
if 'memo' in d:
o.memo = d['memo']
if 'mid' in d:
o.mid = d['mid']
if 'minimum_bid_amount' in d:
o.minimum_bid_amount = d['minimum_bid_amount']
if 'minimum_offer_amount' in d:
o.minimum_offer_amount = d['minimum_offer_amount']
if 'offer' in d:
o.offer = d['offer']
if 'on_off_shore' in d:
o.on_off_shore = d['on_off_shore']
if 'period' in d:
o.period = d['period']
if 'profile' in d:
o.profile = d['profile']
if 'quote_type' in d:
o.quote_type = d['quote_type']
if 'rate_method' in d:
o.rate_method = d['rate_method']
if 'rate_source_code' in d:
o.rate_source_code = d['rate_source_code']
if 'rate_type' in d:
o.rate_type = d['rate_type']
if 'segment_id' in d:
o.segment_id = d['segment_id']
if 'sp_bid' in d:
o.sp_bid = d['sp_bid']
if 'sp_mid' in d:
o.sp_mid = d['sp_mid']
if 'sp_offer' in d:
o.sp_offer = d['sp_offer']
if 'start_time' in d:
o.start_time = d['start_time']
if 'sub_inst' in d:
o.sub_inst = d['sub_inst']
if 'threshold_time' in d:
o.threshold_time = d['threshold_time']
if 'valid_time' in d:
o.valid_time = d['valid_time']
if 'zone_expiry_time' in d:
o.zone_expiry_time = d['zone_expiry_time']
if 'zone_generate_time' in d:
o.zone_generate_time = d['zone_generate_time']
if 'zone_gmt_create' in d:
o.zone_gmt_create = d['zone_gmt_create']
if 'zone_gmt_modified' in d:
o.zone_gmt_modified = d['zone_gmt_modified']
if 'zone_start_time' in d:
o.zone_start_time = d['zone_start_time']
if 'zone_threshold_time' in d:
o.zone_threshold_time = d['zone_threshold_time']
if 'zone_valid_time' in d:
o.zone_valid_time = d['zone_valid_time']
return o
| true
| true
|
f70579f8510e28a077bbf6b0f660e6af3d650613
| 2,017
|
py
|
Python
|
src/scs_dev/disk_volume.py
|
south-coast-science/scs_dev
|
b746adda020498b911cb92f28d4f07b14df996a2
|
[
"MIT"
] | 2
|
2017-04-24T14:58:28.000Z
|
2020-05-27T08:53:46.000Z
|
src/scs_dev/disk_volume.py
|
south-coast-science/scs_dev
|
b746adda020498b911cb92f28d4f07b14df996a2
|
[
"MIT"
] | 1
|
2020-07-13T14:33:59.000Z
|
2021-03-27T08:52:04.000Z
|
src/scs_dev/disk_volume.py
|
south-coast-science/scs_dev
|
b746adda020498b911cb92f28d4f07b14df996a2
|
[
"MIT"
] | 1
|
2018-08-24T09:55:01.000Z
|
2018-08-24T09:55:01.000Z
|
#!/usr/bin/env python3
"""
Created on 15 Oct 2020
@author: Bruno Beloff (bruno.beloff@southcoastscience.com)
DESCRIPTION
The disk_volume utility is used to determine whether a volume is mounted and, if so, the free and used space on
the volume. Space is given in blocks. The volume is identified by its mount point.
If the "is-available" field in the report is false, this indicates that an OS error occurred when
an attempt was made to access the volume. This error can occur if a removable medium failed, or
was disconnected without being unmounted.
The disk_volume utility is normally included in the commands accepted by the control_receiver utility.
SYNOPSIS
disk_volume.py [-v] MOUNTED_ON
EXAMPLES
./disk_volume.py -v /srv/SCS_logging
DOCUMENT EXAMPLE
{"filesystem": "/dev/mmcblk0p1", "size": 15384184, "used": 319296, "free": 14892092,
"mounted-on": "/srv/SCS_logging", "is-available": false}
SEE ALSO
scs_dev/disk_usage
"""
import sys
from scs_core.data.json import JSONify
from scs_dev.cmd.cmd_disk_volume import CmdDiskVolume
from scs_host.sys.host import Host
# --------------------------------------------------------------------------------------------------------------------
if __name__ == '__main__':
# ----------------------------------------------------------------------------------------------------------------
# cmd...
cmd = CmdDiskVolume()
if not cmd.is_valid():
cmd.print_help(sys.stderr)
exit(2)
if cmd.verbose:
print("disk_volume: %s" % cmd, file=sys.stderr)
# ----------------------------------------------------------------------------------------------------------------
# run...
volume = Host.disk_volume(cmd.mounted_on)
print(JSONify.dumps(volume))
# ----------------------------------------------------------------------------------------------------------------
# end...
if cmd.verbose and volume:
print("disk_volume: percent used: %s" % volume.percent_used(), file=sys.stderr)
| 28.814286
| 118
| 0.55181
|
import sys
from scs_core.data.json import JSONify
from scs_dev.cmd.cmd_disk_volume import CmdDiskVolume
from scs_host.sys.host import Host
if __name__ == '__main__':
cmd = CmdDiskVolume()
if not cmd.is_valid():
cmd.print_help(sys.stderr)
exit(2)
if cmd.verbose:
print("disk_volume: %s" % cmd, file=sys.stderr)
volume = Host.disk_volume(cmd.mounted_on)
print(JSONify.dumps(volume))
if cmd.verbose and volume:
print("disk_volume: percent used: %s" % volume.percent_used(), file=sys.stderr)
| true
| true
|
f7057aba528eede039765622b679f775d7e03025
| 1,685
|
py
|
Python
|
nova/tests/virt/test_images.py
|
vmthunder/nova
|
baf05caab705c5778348d9f275dc541747b7c2de
|
[
"Apache-2.0"
] | 1
|
2015-11-25T10:18:22.000Z
|
2015-11-25T10:18:22.000Z
|
nova/tests/virt/test_images.py
|
vmthunder/nova
|
baf05caab705c5778348d9f275dc541747b7c2de
|
[
"Apache-2.0"
] | 9
|
2015-05-20T11:20:17.000Z
|
2017-07-27T08:21:33.000Z
|
nova/tests/virt/test_images.py
|
vmthunder/nova
|
baf05caab705c5778348d9f275dc541747b7c2de
|
[
"Apache-2.0"
] | 13
|
2015-05-05T09:34:04.000Z
|
2017-11-08T02:03:46.000Z
|
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import mock
from nova import exception
from nova.openstack.common import processutils
from nova import test
from nova import utils
from nova.virt import images
class QemuTestCase(test.NoDBTestCase):
def test_qemu_info_with_bad_path(self):
self.assertRaises(exception.InvalidDiskInfo,
images.qemu_img_info,
'/path/that/does/not/exist')
@mock.patch.object(os.path, 'exists', return_value=True)
def test_qemu_info_with_errors(self, path_exists):
self.assertRaises(processutils.ProcessExecutionError,
images.qemu_img_info,
'/fake/path')
@mock.patch.object(os.path, 'exists', return_value=True)
@mock.patch.object(utils, 'execute',
return_value=('stdout', None))
def test_qemu_info_with_no_errors(self, path_exists,
utils_execute):
image_info = images.qemu_img_info('/fake/path')
self.assertTrue(image_info)
self.assertTrue(str(image_info))
| 37.444444
| 78
| 0.67003
|
import os
import mock
from nova import exception
from nova.openstack.common import processutils
from nova import test
from nova import utils
from nova.virt import images
class QemuTestCase(test.NoDBTestCase):
def test_qemu_info_with_bad_path(self):
self.assertRaises(exception.InvalidDiskInfo,
images.qemu_img_info,
'/path/that/does/not/exist')
@mock.patch.object(os.path, 'exists', return_value=True)
def test_qemu_info_with_errors(self, path_exists):
self.assertRaises(processutils.ProcessExecutionError,
images.qemu_img_info,
'/fake/path')
@mock.patch.object(os.path, 'exists', return_value=True)
@mock.patch.object(utils, 'execute',
return_value=('stdout', None))
def test_qemu_info_with_no_errors(self, path_exists,
utils_execute):
image_info = images.qemu_img_info('/fake/path')
self.assertTrue(image_info)
self.assertTrue(str(image_info))
| true
| true
|
f7057b8575bc4fd7fcbfbdc13379515dfc6d8dfe
| 635
|
py
|
Python
|
crawler/edge_image.py
|
Znmangosteen/cgan-face-generator
|
cb2912ad6dd3971af238a83e8d56fb3a43082893
|
[
"BSD-3-Clause"
] | 59
|
2017-10-15T03:59:06.000Z
|
2022-02-27T00:23:12.000Z
|
crawler/edge_image.py
|
Znmangosteen/cgan-face-generator
|
cb2912ad6dd3971af238a83e8d56fb3a43082893
|
[
"BSD-3-Clause"
] | 1
|
2019-08-27T09:05:26.000Z
|
2019-08-27T09:05:26.000Z
|
crawler/edge_image.py
|
Znmangosteen/cgan-face-generator
|
cb2912ad6dd3971af238a83e8d56fb3a43082893
|
[
"BSD-3-Clause"
] | 11
|
2017-12-15T18:23:29.000Z
|
2021-05-23T20:01:31.000Z
|
import cv2
import argparse
import numpy as np
def process_edge_image(input, output):
print('edge', input, output)
img = cv2.imread(input)
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
img = cv2.GaussianBlur(img, (3, 3), 0)
ret, thr = cv2.threshold(img, 0, 255, cv2.THRESH_OTSU)
edges = cv2.Canny(img, ret * 0.5, ret)
cv2.imwrite(output, 255 - edges)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('input', help='input image')
parser.add_argument('output', help='output image')
args = parser.parse_args()
process_edge_image(args.input, args.output)
| 27.608696
| 58
| 0.67874
|
import cv2
import argparse
import numpy as np
def process_edge_image(input, output):
print('edge', input, output)
img = cv2.imread(input)
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
img = cv2.GaussianBlur(img, (3, 3), 0)
ret, thr = cv2.threshold(img, 0, 255, cv2.THRESH_OTSU)
edges = cv2.Canny(img, ret * 0.5, ret)
cv2.imwrite(output, 255 - edges)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('input', help='input image')
parser.add_argument('output', help='output image')
args = parser.parse_args()
process_edge_image(args.input, args.output)
| true
| true
|
f7057d00a3ca443859a829101edfeaf4bf72956b
| 2,962
|
py
|
Python
|
src/config.py
|
sopuli/reporanka
|
a06994e23675f8de50fa878c532660532d14648c
|
[
"MIT"
] | 3
|
2021-11-24T15:39:36.000Z
|
2021-11-25T19:32:07.000Z
|
src/config.py
|
sopuli/reporanka
|
a06994e23675f8de50fa878c532660532d14648c
|
[
"MIT"
] | 12
|
2021-11-27T07:55:20.000Z
|
2021-12-12T23:56:23.000Z
|
src/config.py
|
sopuli/reporanka
|
a06994e23675f8de50fa878c532660532d14648c
|
[
"MIT"
] | 2
|
2021-11-26T14:16:34.000Z
|
2021-12-10T15:30:13.000Z
|
"""Module for specifying the environmental variables."""
import os
DIRNAME = os.path.dirname(__file__)
DB_NAME = "items.csv"
DB_PATH = os.path.join(DIRNAME, "data", DB_NAME)
TEST_DB = "test_items.csv"
TEST_DB_PATH = os.path.join(DIRNAME, "data", TEST_DB)
INSTRUCTIONS = (
"\nValitse toiminto"
"\n (1) lisää"
"\n (2) listaa"
"\n (3) poista"
"\n (4) hae tarkemmat tiedot id:llä"
"\n (5) hae vinkkejä hakusanalla"
"\n (9) poista kaikki vinkit"
"\n (0) lopeta\n")
ADD_MENU = (
"\nMitä lisätään?"
"\n (1) kirja"
"\n (2) video"
"\n (3) blogi"
"\n (4) takaisin valikkoon"
"\n (0) lopeta")
CMD_PROMPTS = {
"book": [("Kirjailijan/kirjailijoiden nimet: ", "Kirjailijan nimi on lisättävä!"),
("Kirjan nimi: ", "Kirjan nimi on lisättävä!"),
("Julkaisuvuosi: ", "Julkaisuvuosi ei ole kelvollinen!")
],
"video": [("Videon tekijä: ", "Videon tekijä on lisättävä!"),
("Videon nimi: ", "Videon nimi on lisättävä!"),
("Videon osoite: ", "Videon osoite on lisättävä!"),
("Videon julkaisupäivä: ", "Videon julkaisupäivä on lisättävä!")
],
"blog": [("Blogin kirjoittaja: ", "Blogin kirjoittaja on lisättävä!"),
("Blogin nimi: ", "Blogin nimi on lisättävä!"),
("Postaus: ", "Postauksen nimi on lisättävä!"),
("Blogin osoite: ", "Blogin osoite on lisättävä!"),
("Postauksen julkaisupäivä: ", "Postauksen julkaisupäivä on lisättävä!")
],
"delete": [("\nAnna poistettavan teoksen id: ", "Teoksen id on annettava!")
],
"search":[("Syötä hakusana: ", "Kirjoita hakusana!")],
"details": [("\nAnna id: ", "ID on annettava!")],
"clear": [("\nPoistetaan kaikki vinkit.", "Ai etkö haluakaan poistaa?")]
}
OUTPUTS = {
"already in list": "\nLukuvinkki on jo tallennettu aiemmin!",
"added": "\nUusi lukuvinkki lisätty.",
"empty list": "Sovellukseen ei ole tallennettu vinkkejä :(",
"choice": "\nValinta: ",
"list": "\nTallennetut vinkit:\n",
"item not found": "Teosta ei löytynyt.",
"confirm": "\nOletko varma (K/E)? ",
"deleting": "Poistetaan vinkki...",
"not deleted": "Vinkkiä ei poistettu.",
"unknown command": "Komentoa ei löytynyt, yritä uudelleen.",
"quit": "Kiitti & moi!",
"creator": "tekijä",
"author": "kirjailija",
"id": "id",
"name": "nimi",
"details results": "\nVinkin tarkemmat tiedot:\n",
"search results": "\nHakusanalla löytyvät vinkit:\n",
"search help": "\nVoit etsiä vinkkiä tekijän ja nimen perusteella syöttämällä hakusanan",
"broken input": "Syötteessäsi on ongelma.",
"confirm_clearing": "\nPoistetaanko ihan kaikki? (K/E) ",
"clearing": "Poistetaan kaikkia vinkkejä. Hyvästi!",
"not cleared": "Vinkkejä ei poistettu."
}
TITLE = "\nLUKUVINKKIKIRJASTO"
HEADERS = ['type', 'id', 'creator', 'title']
YES = 'K'
NO = 'E'
| 35.686747
| 93
| 0.595881
|
import os
DIRNAME = os.path.dirname(__file__)
DB_NAME = "items.csv"
DB_PATH = os.path.join(DIRNAME, "data", DB_NAME)
TEST_DB = "test_items.csv"
TEST_DB_PATH = os.path.join(DIRNAME, "data", TEST_DB)
INSTRUCTIONS = (
"\nValitse toiminto"
"\n (1) lisää"
"\n (2) listaa"
"\n (3) poista"
"\n (4) hae tarkemmat tiedot id:llä"
"\n (5) hae vinkkejä hakusanalla"
"\n (9) poista kaikki vinkit"
"\n (0) lopeta\n")
ADD_MENU = (
"\nMitä lisätään?"
"\n (1) kirja"
"\n (2) video"
"\n (3) blogi"
"\n (4) takaisin valikkoon"
"\n (0) lopeta")
CMD_PROMPTS = {
"book": [("Kirjailijan/kirjailijoiden nimet: ", "Kirjailijan nimi on lisättävä!"),
("Kirjan nimi: ", "Kirjan nimi on lisättävä!"),
("Julkaisuvuosi: ", "Julkaisuvuosi ei ole kelvollinen!")
],
"video": [("Videon tekijä: ", "Videon tekijä on lisättävä!"),
("Videon nimi: ", "Videon nimi on lisättävä!"),
("Videon osoite: ", "Videon osoite on lisättävä!"),
("Videon julkaisupäivä: ", "Videon julkaisupäivä on lisättävä!")
],
"blog": [("Blogin kirjoittaja: ", "Blogin kirjoittaja on lisättävä!"),
("Blogin nimi: ", "Blogin nimi on lisättävä!"),
("Postaus: ", "Postauksen nimi on lisättävä!"),
("Blogin osoite: ", "Blogin osoite on lisättävä!"),
("Postauksen julkaisupäivä: ", "Postauksen julkaisupäivä on lisättävä!")
],
"delete": [("\nAnna poistettavan teoksen id: ", "Teoksen id on annettava!")
],
"search":[("Syötä hakusana: ", "Kirjoita hakusana!")],
"details": [("\nAnna id: ", "ID on annettava!")],
"clear": [("\nPoistetaan kaikki vinkit.", "Ai etkö haluakaan poistaa?")]
}
OUTPUTS = {
"already in list": "\nLukuvinkki on jo tallennettu aiemmin!",
"added": "\nUusi lukuvinkki lisätty.",
"empty list": "Sovellukseen ei ole tallennettu vinkkejä :(",
"choice": "\nValinta: ",
"list": "\nTallennetut vinkit:\n",
"item not found": "Teosta ei löytynyt.",
"confirm": "\nOletko varma (K/E)? ",
"deleting": "Poistetaan vinkki...",
"not deleted": "Vinkkiä ei poistettu.",
"unknown command": "Komentoa ei löytynyt, yritä uudelleen.",
"quit": "Kiitti & moi!",
"creator": "tekijä",
"author": "kirjailija",
"id": "id",
"name": "nimi",
"details results": "\nVinkin tarkemmat tiedot:\n",
"search results": "\nHakusanalla löytyvät vinkit:\n",
"search help": "\nVoit etsiä vinkkiä tekijän ja nimen perusteella syöttämällä hakusanan",
"broken input": "Syötteessäsi on ongelma.",
"confirm_clearing": "\nPoistetaanko ihan kaikki? (K/E) ",
"clearing": "Poistetaan kaikkia vinkkejä. Hyvästi!",
"not cleared": "Vinkkejä ei poistettu."
}
TITLE = "\nLUKUVINKKIKIRJASTO"
HEADERS = ['type', 'id', 'creator', 'title']
YES = 'K'
NO = 'E'
| true
| true
|
f7057e752fa60f9a70ee24a03a508261e1d4ed2f
| 19,887
|
py
|
Python
|
hummingbot/client/hummingbot_application.py
|
Loopring/hummingbot-deprecated
|
43be8574ed9efd405aeee13a34c7a87ee732c7aa
|
[
"Apache-2.0"
] | null | null | null |
hummingbot/client/hummingbot_application.py
|
Loopring/hummingbot-deprecated
|
43be8574ed9efd405aeee13a34c7a87ee732c7aa
|
[
"Apache-2.0"
] | null | null | null |
hummingbot/client/hummingbot_application.py
|
Loopring/hummingbot-deprecated
|
43be8574ed9efd405aeee13a34c7a87ee732c7aa
|
[
"Apache-2.0"
] | 1
|
2021-11-23T19:59:17.000Z
|
2021-11-23T19:59:17.000Z
|
#!/usr/bin/env python
import asyncio
from collections import deque
import logging
import time
from typing import List, Dict, Optional, Tuple, Set, Deque
from hummingbot.client.command import __all__ as commands
from hummingbot.core.clock import Clock
from hummingbot.core.data_type.order_book_tracker import OrderBookTrackerDataSourceType
from hummingbot.core.data_type.user_stream_tracker import UserStreamTrackerDataSourceType
from hummingbot.logger import HummingbotLogger
from hummingbot.logger.application_warning import ApplicationWarning
from hummingbot.market.binance.binance_market import BinanceMarket
from hummingbot.market.bittrex.bittrex_market import BittrexMarket
from hummingbot.market.kucoin.kucoin_market import KucoinMarket
from hummingbot.market.coinbase_pro.coinbase_pro_market import CoinbaseProMarket
from hummingbot.market.huobi.huobi_market import HuobiMarket
from hummingbot.market.liquid.liquid_market import LiquidMarket
from hummingbot.market.market_base import MarketBase
from hummingbot.market.paper_trade import create_paper_trade_market
from hummingbot.market.radar_relay.radar_relay_market import RadarRelayMarket
from hummingbot.market.bamboo_relay.bamboo_relay_market import BambooRelayMarket
from hummingbot.market.dolomite.dolomite_market import DolomiteMarket
from hummingbot.market.loopring.loopring_market import LoopringMarket
from hummingbot.market.bitcoin_com.bitcoin_com_market import BitcoinComMarket
from hummingbot.market.kraken.kraken_market import KrakenMarket
from hummingbot.model.sql_connection_manager import SQLConnectionManager
from hummingbot.wallet.ethereum.ethereum_chain import EthereumChain
from hummingbot.wallet.ethereum.web3_wallet import Web3Wallet
from hummingbot.client.ui.keybindings import load_key_bindings
from hummingbot.client.ui.parser import load_parser, ThrowingArgumentParser
from hummingbot.client.ui.hummingbot_cli import HummingbotCLI
from hummingbot.client.ui.completer import load_completer
from hummingbot.client.errors import InvalidCommandError, ArgumentParserError
from hummingbot.client.config.global_config_map import global_config_map, using_wallet
from hummingbot.client.config.config_helpers import get_erc20_token_addresses, get_strategy_config_map
from hummingbot.strategy.strategy_base import StrategyBase
from hummingbot.strategy.cross_exchange_market_making import CrossExchangeMarketPair
from hummingbot.core.utils.kill_switch import KillSwitch
from hummingbot.data_feed.data_feed_base import DataFeedBase
from hummingbot.notifier.notifier_base import NotifierBase
from hummingbot.notifier.telegram_notifier import TelegramNotifier
from hummingbot.strategy.market_trading_pair_tuple import MarketTradingPairTuple
from hummingbot.market.markets_recorder import MarketsRecorder
from hummingbot.client.config.security import Security
s_logger = None
MARKET_CLASSES = {
"bamboo_relay": BambooRelayMarket,
"binance": BinanceMarket,
"coinbase_pro": CoinbaseProMarket,
"huobi": HuobiMarket,
"liquid": LiquidMarket,
"radar_relay": RadarRelayMarket,
"dolomite": DolomiteMarket,
"loopring": LoopringMarket,
"bittrex": BittrexMarket,
"kucoin": KucoinMarket,
"bitcoin_com": BitcoinComMarket,
"kraken": KrakenMarket,
}
class HummingbotApplication(*commands):
KILL_TIMEOUT = 10.0
APP_WARNING_EXPIRY_DURATION = 3600.0
APP_WARNING_STATUS_LIMIT = 6
_main_app: Optional["HummingbotApplication"] = None
@classmethod
def logger(cls) -> HummingbotLogger:
global s_logger
if s_logger is None:
s_logger = logging.getLogger(__name__)
return s_logger
@classmethod
def main_application(cls) -> "HummingbotApplication":
if cls._main_app is None:
cls._main_app = HummingbotApplication()
return cls._main_app
def __init__(self):
self.ev_loop: asyncio.BaseEventLoop = asyncio.get_event_loop()
self.parser: ThrowingArgumentParser = load_parser(self)
self.app = HummingbotCLI(
input_handler=self._handle_command, bindings=load_key_bindings(self), completer=load_completer(self)
)
self.markets: Dict[str, MarketBase] = {}
self.wallet: Optional[Web3Wallet] = None
# strategy file name and name get assigned value after import or create command
self.strategy_file_name: str = None
self.strategy_name: str = None
self.strategy_task: Optional[asyncio.Task] = None
self.strategy: Optional[StrategyBase] = None
self.market_pair: Optional[CrossExchangeMarketPair] = None
self.market_trading_pair_tuples: List[MarketTradingPairTuple] = []
self.clock: Optional[Clock] = None
self.init_time: int = int(time.time() * 1e3)
self.start_time: Optional[int] = None
self.assets: Optional[Set[str]] = set()
self.starting_balances = {}
self.placeholder_mode = False
self.log_queue_listener: Optional[logging.handlers.QueueListener] = None
self.data_feed: Optional[DataFeedBase] = None
self.notifiers: List[NotifierBase] = []
self.kill_switch: Optional[KillSwitch] = None
self._app_warnings: Deque[ApplicationWarning] = deque()
self._trading_required: bool = True
self.trade_fill_db: SQLConnectionManager = SQLConnectionManager.get_trade_fills_instance()
self.markets_recorder: Optional[MarketsRecorder] = None
@property
def strategy_config_map(self):
if self.strategy_name is not None:
return get_strategy_config_map(self.strategy_name)
return None
def _notify(self, msg: str):
self.app.log(msg)
for notifier in self.notifiers:
notifier.add_msg_to_queue(msg)
def _handle_command(self, raw_command: str):
raw_command = raw_command.lower().strip()
try:
if self.placeholder_mode:
pass
else:
args = self.parser.parse_args(args=raw_command.split())
kwargs = vars(args)
if not hasattr(args, "func"):
return
f = args.func
del kwargs["func"]
f(**kwargs)
except InvalidCommandError as e:
self._notify("Invalid command: %s" % (str(e),))
except ArgumentParserError as e:
self._notify(str(e))
except NotImplementedError:
self._notify("Command not yet implemented. This feature is currently under development.")
except Exception as e:
self.logger().error(e, exc_info=True)
async def _cancel_outstanding_orders(self) -> bool:
success = True
try:
on_chain_cancel_on_exit = global_config_map.get("on_chain_cancel_on_exit").value
bamboo_relay_use_coordinator = global_config_map.get("bamboo_relay_use_coordinator").value
kill_timeout: float = self.KILL_TIMEOUT
self._notify("Cancelling outstanding orders...")
for market_name, market in self.markets.items():
# By default, the bot does not cancel orders on exit on Radar Relay or Bamboo Relay,
# since all open orders will expire in a short window
if not on_chain_cancel_on_exit and (market_name == "radar_relay" or (market_name == "bamboo_relay" and not bamboo_relay_use_coordinator)):
continue
cancellation_results = await market.cancel_all(kill_timeout)
uncancelled = list(filter(lambda cr: cr.success is False, cancellation_results))
if len(uncancelled) > 0:
success = False
uncancelled_order_ids = list(map(lambda cr: cr.order_id, uncancelled))
self._notify("\nFailed to cancel the following orders on %s:\n%s" % (
market_name,
'\n'.join(uncancelled_order_ids)
))
except Exception:
self.logger().error(f"Error canceling outstanding orders.", exc_info=True)
success = False
if success:
self._notify("All outstanding orders cancelled.")
return success
async def run(self):
await self.app.run()
def add_application_warning(self, app_warning: ApplicationWarning):
self._expire_old_application_warnings()
self._app_warnings.append(app_warning)
def clear_application_warning(self):
self._app_warnings.clear()
@staticmethod
def _initialize_market_assets(market_name: str, trading_pairs: List[str]) -> List[Tuple[str, str]]:
market_class: MarketBase = MARKET_CLASSES.get(market_name, MarketBase)
market_trading_pairs: List[Tuple[str, str]] = [market_class.split_trading_pair(trading_pair) for trading_pair in trading_pairs]
return market_trading_pairs
@staticmethod
def _convert_to_exchange_trading_pair(market_name: str, hb_trading_pair: List[str]) -> List[str]:
market_class: MarketBase = MARKET_CLASSES.get(market_name, MarketBase)
return [market_class.convert_to_exchange_trading_pair(trading_pair) for trading_pair in hb_trading_pair]
def _initialize_wallet(self, token_trading_pairs: List[str]):
if not using_wallet():
return
ethereum_wallet = global_config_map.get("ethereum_wallet").value
private_key = Security._private_keys[ethereum_wallet]
ethereum_rpc_url = global_config_map.get("ethereum_rpc_url").value
erc20_token_addresses = get_erc20_token_addresses(token_trading_pairs)
chain_name: str = global_config_map.get("ethereum_chain_name").value
self.wallet: Web3Wallet = Web3Wallet(
private_key=private_key,
backend_urls=[ethereum_rpc_url],
erc20_token_addresses=erc20_token_addresses,
chain=getattr(EthereumChain, chain_name),
)
def _initialize_markets(self, market_names: List[Tuple[str, List[str]]]):
ethereum_rpc_url = global_config_map.get("ethereum_rpc_url").value
# aggregate trading_pairs if there are duplicate markets
market_trading_pairs_map = {}
for market_name, trading_pairs in market_names:
if market_name not in market_trading_pairs_map:
market_trading_pairs_map[market_name] = []
market_class: MarketBase = MARKET_CLASSES.get(market_name, MarketBase)
for trading_pair in trading_pairs:
exchange_trading_pair: str = market_class.convert_to_exchange_trading_pair(trading_pair)
market_trading_pairs_map[market_name].append(exchange_trading_pair)
for market_name, trading_pairs in market_trading_pairs_map.items():
if global_config_map.get("paper_trade_enabled").value:
try:
market = create_paper_trade_market(market_name, trading_pairs)
except Exception:
raise
paper_trade_account_balance = global_config_map.get("paper_trade_account_balance").value
for asset, balance in paper_trade_account_balance:
market.set_balance(asset, balance)
elif market_name == "binance":
binance_api_key = global_config_map.get("binance_api_key").value
binance_api_secret = global_config_map.get("binance_api_secret").value
market = BinanceMarket(
binance_api_key,
binance_api_secret,
order_book_tracker_data_source_type=OrderBookTrackerDataSourceType.EXCHANGE_API,
trading_pairs=trading_pairs,
trading_required=self._trading_required,
)
elif market_name == "radar_relay":
assert self.wallet is not None
market = RadarRelayMarket(
wallet=self.wallet,
ethereum_rpc_url=ethereum_rpc_url,
trading_pairs=trading_pairs,
trading_required=self._trading_required,
)
elif market_name == "bamboo_relay":
assert self.wallet is not None
use_coordinator = global_config_map.get("bamboo_relay_use_coordinator").value
pre_emptive_soft_cancels = global_config_map.get("bamboo_relay_pre_emptive_soft_cancels").value
market = BambooRelayMarket(
wallet=self.wallet,
ethereum_rpc_url=ethereum_rpc_url,
trading_pairs=trading_pairs,
use_coordinator=use_coordinator,
pre_emptive_soft_cancels=pre_emptive_soft_cancels,
trading_required=self._trading_required,
)
elif market_name == "coinbase_pro":
coinbase_pro_api_key = global_config_map.get("coinbase_pro_api_key").value
coinbase_pro_secret_key = global_config_map.get("coinbase_pro_secret_key").value
coinbase_pro_passphrase = global_config_map.get("coinbase_pro_passphrase").value
market = CoinbaseProMarket(coinbase_pro_api_key,
coinbase_pro_secret_key,
coinbase_pro_passphrase,
trading_pairs=trading_pairs,
trading_required=self._trading_required)
elif market_name == "huobi":
huobi_api_key = global_config_map.get("huobi_api_key").value
huobi_secret_key = global_config_map.get("huobi_secret_key").value
market = HuobiMarket(huobi_api_key,
huobi_secret_key,
order_book_tracker_data_source_type=OrderBookTrackerDataSourceType.EXCHANGE_API,
trading_pairs=trading_pairs,
trading_required=self._trading_required)
elif market_name == "liquid":
liquid_api_key = global_config_map.get("liquid_api_key").value
liquid_secret_key = global_config_map.get("liquid_secret_key").value
market = LiquidMarket(liquid_api_key,
liquid_secret_key,
order_book_tracker_data_source_type=OrderBookTrackerDataSourceType.EXCHANGE_API,
user_stream_tracker_data_source_type=UserStreamTrackerDataSourceType.EXCHANGE_API,
trading_pairs=trading_pairs,
trading_required=self._trading_required)
elif market_name == "dolomite":
assert self.wallet is not None
is_test_net: bool = global_config_map.get("ethereum_chain_name").value == "DOLOMITE_TEST"
market = DolomiteMarket(
wallet=self.wallet,
ethereum_rpc_url=ethereum_rpc_url,
order_book_tracker_data_source_type=OrderBookTrackerDataSourceType.EXCHANGE_API,
trading_pairs=trading_pairs,
isTestNet=is_test_net,
trading_required=self._trading_required,
)
elif market_name == "loopring":
loopring_accountid : int = global_config_map.get("loopring_accountid").value
loopring_exchangeid : int = global_config_map.get("loopring_exchangeid").value
loopring_private_key : str = global_config_map.get("loopring_private_key").value
loopring_api_key : str = global_config_map.get("loopring_api_key").value
market = LoopringMarket(
loopring_accountid=loopring_accountid,
loopring_exchangeid=loopring_exchangeid,
loopring_private_key=loopring_private_key,
loopring_api_key=loopring_api_key,
trading_pairs=trading_pairs,
trading_required=self._trading_required,
order_book_tracker_data_source_type=OrderBookTrackerDataSourceType.EXCHANGE_API
)
elif market_name == "bittrex":
bittrex_api_key = global_config_map.get("bittrex_api_key").value
bittrex_secret_key = global_config_map.get("bittrex_secret_key").value
market = BittrexMarket(bittrex_api_key,
bittrex_secret_key,
order_book_tracker_data_source_type=OrderBookTrackerDataSourceType.EXCHANGE_API,
trading_pairs=trading_pairs,
trading_required=self._trading_required)
elif market_name == "kucoin":
kucoin_api_key = global_config_map.get("kucoin_api_key").value
kucoin_secret_key = global_config_map.get("kucoin_secret_key").value
kucoin_passphrase = global_config_map.get("kucoin_passphrase").value
market = KucoinMarket(kucoin_api_key,
kucoin_passphrase,
kucoin_secret_key,
order_book_tracker_data_source_type=OrderBookTrackerDataSourceType.EXCHANGE_API,
trading_pairs=trading_pairs,
trading_required=self._trading_required)
elif market_name == "bitcoin_com":
bitcoin_com_api_key = global_config_map.get("bitcoin_com_api_key").value
bitcoin_com_secret_key = global_config_map.get("bitcoin_com_secret_key").value
market = BitcoinComMarket(bitcoin_com_api_key,
bitcoin_com_secret_key,
order_book_tracker_data_source_type=OrderBookTrackerDataSourceType.EXCHANGE_API,
trading_pairs=trading_pairs,
trading_required=self._trading_required)
elif market_name == "kraken":
kraken_api_key = global_config_map.get("kraken_api_key").value
kraken_secret_key = global_config_map.get("kraken_secret_key").value
market = KrakenMarket(kraken_api_key,
kraken_secret_key,
order_book_tracker_data_source_type=OrderBookTrackerDataSourceType.EXCHANGE_API,
trading_pairs=trading_pairs,
trading_required=self._trading_required)
else:
raise ValueError(f"Market name {market_name} is invalid.")
self.markets[market_name]: MarketBase = market
self.markets_recorder = MarketsRecorder(
self.trade_fill_db,
list(self.markets.values()),
self.strategy_file_name,
self.strategy_name,
)
self.markets_recorder.start()
def _initialize_notifiers(self):
if global_config_map.get("telegram_enabled").value:
# TODO: refactor to use single instance
if not any([isinstance(n, TelegramNotifier) for n in self.notifiers]):
self.notifiers.append(
TelegramNotifier(
token=global_config_map["telegram_token"].value,
chat_id=global_config_map["telegram_chat_id"].value,
hb=self,
)
)
for notifier in self.notifiers:
notifier.start()
| 50.603053
| 154
| 0.654196
|
import asyncio
from collections import deque
import logging
import time
from typing import List, Dict, Optional, Tuple, Set, Deque
from hummingbot.client.command import __all__ as commands
from hummingbot.core.clock import Clock
from hummingbot.core.data_type.order_book_tracker import OrderBookTrackerDataSourceType
from hummingbot.core.data_type.user_stream_tracker import UserStreamTrackerDataSourceType
from hummingbot.logger import HummingbotLogger
from hummingbot.logger.application_warning import ApplicationWarning
from hummingbot.market.binance.binance_market import BinanceMarket
from hummingbot.market.bittrex.bittrex_market import BittrexMarket
from hummingbot.market.kucoin.kucoin_market import KucoinMarket
from hummingbot.market.coinbase_pro.coinbase_pro_market import CoinbaseProMarket
from hummingbot.market.huobi.huobi_market import HuobiMarket
from hummingbot.market.liquid.liquid_market import LiquidMarket
from hummingbot.market.market_base import MarketBase
from hummingbot.market.paper_trade import create_paper_trade_market
from hummingbot.market.radar_relay.radar_relay_market import RadarRelayMarket
from hummingbot.market.bamboo_relay.bamboo_relay_market import BambooRelayMarket
from hummingbot.market.dolomite.dolomite_market import DolomiteMarket
from hummingbot.market.loopring.loopring_market import LoopringMarket
from hummingbot.market.bitcoin_com.bitcoin_com_market import BitcoinComMarket
from hummingbot.market.kraken.kraken_market import KrakenMarket
from hummingbot.model.sql_connection_manager import SQLConnectionManager
from hummingbot.wallet.ethereum.ethereum_chain import EthereumChain
from hummingbot.wallet.ethereum.web3_wallet import Web3Wallet
from hummingbot.client.ui.keybindings import load_key_bindings
from hummingbot.client.ui.parser import load_parser, ThrowingArgumentParser
from hummingbot.client.ui.hummingbot_cli import HummingbotCLI
from hummingbot.client.ui.completer import load_completer
from hummingbot.client.errors import InvalidCommandError, ArgumentParserError
from hummingbot.client.config.global_config_map import global_config_map, using_wallet
from hummingbot.client.config.config_helpers import get_erc20_token_addresses, get_strategy_config_map
from hummingbot.strategy.strategy_base import StrategyBase
from hummingbot.strategy.cross_exchange_market_making import CrossExchangeMarketPair
from hummingbot.core.utils.kill_switch import KillSwitch
from hummingbot.data_feed.data_feed_base import DataFeedBase
from hummingbot.notifier.notifier_base import NotifierBase
from hummingbot.notifier.telegram_notifier import TelegramNotifier
from hummingbot.strategy.market_trading_pair_tuple import MarketTradingPairTuple
from hummingbot.market.markets_recorder import MarketsRecorder
from hummingbot.client.config.security import Security
s_logger = None
MARKET_CLASSES = {
"bamboo_relay": BambooRelayMarket,
"binance": BinanceMarket,
"coinbase_pro": CoinbaseProMarket,
"huobi": HuobiMarket,
"liquid": LiquidMarket,
"radar_relay": RadarRelayMarket,
"dolomite": DolomiteMarket,
"loopring": LoopringMarket,
"bittrex": BittrexMarket,
"kucoin": KucoinMarket,
"bitcoin_com": BitcoinComMarket,
"kraken": KrakenMarket,
}
class HummingbotApplication(*commands):
KILL_TIMEOUT = 10.0
APP_WARNING_EXPIRY_DURATION = 3600.0
APP_WARNING_STATUS_LIMIT = 6
_main_app: Optional["HummingbotApplication"] = None
@classmethod
def logger(cls) -> HummingbotLogger:
global s_logger
if s_logger is None:
s_logger = logging.getLogger(__name__)
return s_logger
@classmethod
def main_application(cls) -> "HummingbotApplication":
if cls._main_app is None:
cls._main_app = HummingbotApplication()
return cls._main_app
def __init__(self):
self.ev_loop: asyncio.BaseEventLoop = asyncio.get_event_loop()
self.parser: ThrowingArgumentParser = load_parser(self)
self.app = HummingbotCLI(
input_handler=self._handle_command, bindings=load_key_bindings(self), completer=load_completer(self)
)
self.markets: Dict[str, MarketBase] = {}
self.wallet: Optional[Web3Wallet] = None
self.strategy_file_name: str = None
self.strategy_name: str = None
self.strategy_task: Optional[asyncio.Task] = None
self.strategy: Optional[StrategyBase] = None
self.market_pair: Optional[CrossExchangeMarketPair] = None
self.market_trading_pair_tuples: List[MarketTradingPairTuple] = []
self.clock: Optional[Clock] = None
self.init_time: int = int(time.time() * 1e3)
self.start_time: Optional[int] = None
self.assets: Optional[Set[str]] = set()
self.starting_balances = {}
self.placeholder_mode = False
self.log_queue_listener: Optional[logging.handlers.QueueListener] = None
self.data_feed: Optional[DataFeedBase] = None
self.notifiers: List[NotifierBase] = []
self.kill_switch: Optional[KillSwitch] = None
self._app_warnings: Deque[ApplicationWarning] = deque()
self._trading_required: bool = True
self.trade_fill_db: SQLConnectionManager = SQLConnectionManager.get_trade_fills_instance()
self.markets_recorder: Optional[MarketsRecorder] = None
@property
def strategy_config_map(self):
if self.strategy_name is not None:
return get_strategy_config_map(self.strategy_name)
return None
def _notify(self, msg: str):
self.app.log(msg)
for notifier in self.notifiers:
notifier.add_msg_to_queue(msg)
def _handle_command(self, raw_command: str):
raw_command = raw_command.lower().strip()
try:
if self.placeholder_mode:
pass
else:
args = self.parser.parse_args(args=raw_command.split())
kwargs = vars(args)
if not hasattr(args, "func"):
return
f = args.func
del kwargs["func"]
f(**kwargs)
except InvalidCommandError as e:
self._notify("Invalid command: %s" % (str(e),))
except ArgumentParserError as e:
self._notify(str(e))
except NotImplementedError:
self._notify("Command not yet implemented. This feature is currently under development.")
except Exception as e:
self.logger().error(e, exc_info=True)
async def _cancel_outstanding_orders(self) -> bool:
success = True
try:
on_chain_cancel_on_exit = global_config_map.get("on_chain_cancel_on_exit").value
bamboo_relay_use_coordinator = global_config_map.get("bamboo_relay_use_coordinator").value
kill_timeout: float = self.KILL_TIMEOUT
self._notify("Cancelling outstanding orders...")
for market_name, market in self.markets.items():
if not on_chain_cancel_on_exit and (market_name == "radar_relay" or (market_name == "bamboo_relay" and not bamboo_relay_use_coordinator)):
continue
cancellation_results = await market.cancel_all(kill_timeout)
uncancelled = list(filter(lambda cr: cr.success is False, cancellation_results))
if len(uncancelled) > 0:
success = False
uncancelled_order_ids = list(map(lambda cr: cr.order_id, uncancelled))
self._notify("\nFailed to cancel the following orders on %s:\n%s" % (
market_name,
'\n'.join(uncancelled_order_ids)
))
except Exception:
self.logger().error(f"Error canceling outstanding orders.", exc_info=True)
success = False
if success:
self._notify("All outstanding orders cancelled.")
return success
async def run(self):
await self.app.run()
def add_application_warning(self, app_warning: ApplicationWarning):
self._expire_old_application_warnings()
self._app_warnings.append(app_warning)
def clear_application_warning(self):
self._app_warnings.clear()
@staticmethod
def _initialize_market_assets(market_name: str, trading_pairs: List[str]) -> List[Tuple[str, str]]:
market_class: MarketBase = MARKET_CLASSES.get(market_name, MarketBase)
market_trading_pairs: List[Tuple[str, str]] = [market_class.split_trading_pair(trading_pair) for trading_pair in trading_pairs]
return market_trading_pairs
@staticmethod
def _convert_to_exchange_trading_pair(market_name: str, hb_trading_pair: List[str]) -> List[str]:
market_class: MarketBase = MARKET_CLASSES.get(market_name, MarketBase)
return [market_class.convert_to_exchange_trading_pair(trading_pair) for trading_pair in hb_trading_pair]
def _initialize_wallet(self, token_trading_pairs: List[str]):
if not using_wallet():
return
ethereum_wallet = global_config_map.get("ethereum_wallet").value
private_key = Security._private_keys[ethereum_wallet]
ethereum_rpc_url = global_config_map.get("ethereum_rpc_url").value
erc20_token_addresses = get_erc20_token_addresses(token_trading_pairs)
chain_name: str = global_config_map.get("ethereum_chain_name").value
self.wallet: Web3Wallet = Web3Wallet(
private_key=private_key,
backend_urls=[ethereum_rpc_url],
erc20_token_addresses=erc20_token_addresses,
chain=getattr(EthereumChain, chain_name),
)
def _initialize_markets(self, market_names: List[Tuple[str, List[str]]]):
ethereum_rpc_url = global_config_map.get("ethereum_rpc_url").value
market_trading_pairs_map = {}
for market_name, trading_pairs in market_names:
if market_name not in market_trading_pairs_map:
market_trading_pairs_map[market_name] = []
market_class: MarketBase = MARKET_CLASSES.get(market_name, MarketBase)
for trading_pair in trading_pairs:
exchange_trading_pair: str = market_class.convert_to_exchange_trading_pair(trading_pair)
market_trading_pairs_map[market_name].append(exchange_trading_pair)
for market_name, trading_pairs in market_trading_pairs_map.items():
if global_config_map.get("paper_trade_enabled").value:
try:
market = create_paper_trade_market(market_name, trading_pairs)
except Exception:
raise
paper_trade_account_balance = global_config_map.get("paper_trade_account_balance").value
for asset, balance in paper_trade_account_balance:
market.set_balance(asset, balance)
elif market_name == "binance":
binance_api_key = global_config_map.get("binance_api_key").value
binance_api_secret = global_config_map.get("binance_api_secret").value
market = BinanceMarket(
binance_api_key,
binance_api_secret,
order_book_tracker_data_source_type=OrderBookTrackerDataSourceType.EXCHANGE_API,
trading_pairs=trading_pairs,
trading_required=self._trading_required,
)
elif market_name == "radar_relay":
assert self.wallet is not None
market = RadarRelayMarket(
wallet=self.wallet,
ethereum_rpc_url=ethereum_rpc_url,
trading_pairs=trading_pairs,
trading_required=self._trading_required,
)
elif market_name == "bamboo_relay":
assert self.wallet is not None
use_coordinator = global_config_map.get("bamboo_relay_use_coordinator").value
pre_emptive_soft_cancels = global_config_map.get("bamboo_relay_pre_emptive_soft_cancels").value
market = BambooRelayMarket(
wallet=self.wallet,
ethereum_rpc_url=ethereum_rpc_url,
trading_pairs=trading_pairs,
use_coordinator=use_coordinator,
pre_emptive_soft_cancels=pre_emptive_soft_cancels,
trading_required=self._trading_required,
)
elif market_name == "coinbase_pro":
coinbase_pro_api_key = global_config_map.get("coinbase_pro_api_key").value
coinbase_pro_secret_key = global_config_map.get("coinbase_pro_secret_key").value
coinbase_pro_passphrase = global_config_map.get("coinbase_pro_passphrase").value
market = CoinbaseProMarket(coinbase_pro_api_key,
coinbase_pro_secret_key,
coinbase_pro_passphrase,
trading_pairs=trading_pairs,
trading_required=self._trading_required)
elif market_name == "huobi":
huobi_api_key = global_config_map.get("huobi_api_key").value
huobi_secret_key = global_config_map.get("huobi_secret_key").value
market = HuobiMarket(huobi_api_key,
huobi_secret_key,
order_book_tracker_data_source_type=OrderBookTrackerDataSourceType.EXCHANGE_API,
trading_pairs=trading_pairs,
trading_required=self._trading_required)
elif market_name == "liquid":
liquid_api_key = global_config_map.get("liquid_api_key").value
liquid_secret_key = global_config_map.get("liquid_secret_key").value
market = LiquidMarket(liquid_api_key,
liquid_secret_key,
order_book_tracker_data_source_type=OrderBookTrackerDataSourceType.EXCHANGE_API,
user_stream_tracker_data_source_type=UserStreamTrackerDataSourceType.EXCHANGE_API,
trading_pairs=trading_pairs,
trading_required=self._trading_required)
elif market_name == "dolomite":
assert self.wallet is not None
is_test_net: bool = global_config_map.get("ethereum_chain_name").value == "DOLOMITE_TEST"
market = DolomiteMarket(
wallet=self.wallet,
ethereum_rpc_url=ethereum_rpc_url,
order_book_tracker_data_source_type=OrderBookTrackerDataSourceType.EXCHANGE_API,
trading_pairs=trading_pairs,
isTestNet=is_test_net,
trading_required=self._trading_required,
)
elif market_name == "loopring":
loopring_accountid : int = global_config_map.get("loopring_accountid").value
loopring_exchangeid : int = global_config_map.get("loopring_exchangeid").value
loopring_private_key : str = global_config_map.get("loopring_private_key").value
loopring_api_key : str = global_config_map.get("loopring_api_key").value
market = LoopringMarket(
loopring_accountid=loopring_accountid,
loopring_exchangeid=loopring_exchangeid,
loopring_private_key=loopring_private_key,
loopring_api_key=loopring_api_key,
trading_pairs=trading_pairs,
trading_required=self._trading_required,
order_book_tracker_data_source_type=OrderBookTrackerDataSourceType.EXCHANGE_API
)
elif market_name == "bittrex":
bittrex_api_key = global_config_map.get("bittrex_api_key").value
bittrex_secret_key = global_config_map.get("bittrex_secret_key").value
market = BittrexMarket(bittrex_api_key,
bittrex_secret_key,
order_book_tracker_data_source_type=OrderBookTrackerDataSourceType.EXCHANGE_API,
trading_pairs=trading_pairs,
trading_required=self._trading_required)
elif market_name == "kucoin":
kucoin_api_key = global_config_map.get("kucoin_api_key").value
kucoin_secret_key = global_config_map.get("kucoin_secret_key").value
kucoin_passphrase = global_config_map.get("kucoin_passphrase").value
market = KucoinMarket(kucoin_api_key,
kucoin_passphrase,
kucoin_secret_key,
order_book_tracker_data_source_type=OrderBookTrackerDataSourceType.EXCHANGE_API,
trading_pairs=trading_pairs,
trading_required=self._trading_required)
elif market_name == "bitcoin_com":
bitcoin_com_api_key = global_config_map.get("bitcoin_com_api_key").value
bitcoin_com_secret_key = global_config_map.get("bitcoin_com_secret_key").value
market = BitcoinComMarket(bitcoin_com_api_key,
bitcoin_com_secret_key,
order_book_tracker_data_source_type=OrderBookTrackerDataSourceType.EXCHANGE_API,
trading_pairs=trading_pairs,
trading_required=self._trading_required)
elif market_name == "kraken":
kraken_api_key = global_config_map.get("kraken_api_key").value
kraken_secret_key = global_config_map.get("kraken_secret_key").value
market = KrakenMarket(kraken_api_key,
kraken_secret_key,
order_book_tracker_data_source_type=OrderBookTrackerDataSourceType.EXCHANGE_API,
trading_pairs=trading_pairs,
trading_required=self._trading_required)
else:
raise ValueError(f"Market name {market_name} is invalid.")
self.markets[market_name]: MarketBase = market
self.markets_recorder = MarketsRecorder(
self.trade_fill_db,
list(self.markets.values()),
self.strategy_file_name,
self.strategy_name,
)
self.markets_recorder.start()
def _initialize_notifiers(self):
if global_config_map.get("telegram_enabled").value:
if not any([isinstance(n, TelegramNotifier) for n in self.notifiers]):
self.notifiers.append(
TelegramNotifier(
token=global_config_map["telegram_token"].value,
chat_id=global_config_map["telegram_chat_id"].value,
hb=self,
)
)
for notifier in self.notifiers:
notifier.start()
| true
| true
|
f7057f4bf2418d7e4f54b2e0e6f937362a8f09bc
| 4,381
|
py
|
Python
|
faassupervisor/faas/binary/supervisor.py
|
grycap/faas-supervisor
|
f5dcb6a16cadec53235c13278942567947c7b443
|
[
"Apache-2.0"
] | 7
|
2019-03-14T15:18:54.000Z
|
2022-01-13T07:37:18.000Z
|
faassupervisor/faas/binary/supervisor.py
|
grycap/faas-supervisor
|
f5dcb6a16cadec53235c13278942567947c7b443
|
[
"Apache-2.0"
] | 2
|
2019-10-14T09:50:57.000Z
|
2020-01-08T11:25:54.000Z
|
faassupervisor/faas/binary/supervisor.py
|
grycap/faas-supervisor
|
f5dcb6a16cadec53235c13278942567947c7b443
|
[
"Apache-2.0"
] | 8
|
2019-04-02T16:48:46.000Z
|
2022-01-28T13:45:49.000Z
|
# Copyright (C) GRyCAP - I3M - UPV
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module with all the classes and methods
related with the binary supervisor."""
import subprocess
import sys
import uuid
from faassupervisor.faas import DefaultSupervisor
from faassupervisor.logger import get_logger
from faassupervisor.utils import SysUtils, FileUtils, StrUtils
class BinarySupervisor(DefaultSupervisor):
"""Supervisor class used in the Binary environment."""
_SCRIPT_FILE_NAME = 'script.sh'
_OSCAR_SCRIPT_PATH = '/oscar/config/script.sh'
def __init__(self, event_type):
self.output = ''
self.event_type = event_type
get_logger().info('SUPERVISOR: Initializing Binary supervisor')
def _get_script_path(self):
script_path = None
if SysUtils.is_var_in_env('SCRIPT'):
script_path = SysUtils.join_paths(SysUtils.get_env_var("TMP_INPUT_DIR"),
self._SCRIPT_FILE_NAME)
script_content = StrUtils.base64_to_str(SysUtils.get_env_var('SCRIPT'))
FileUtils.create_file_with_content(script_path, script_content)
get_logger().info("Script file created in '%s'", script_path)
elif FileUtils.is_file(self._OSCAR_SCRIPT_PATH):
script_path = self._OSCAR_SCRIPT_PATH
get_logger().info("Script file found in '%s'", script_path)
return script_path
def execute_function(self):
script_path = self._get_script_path()
if script_path:
try:
pyinstaller_library_path = SysUtils.get_env_var('LD_LIBRARY_PATH')
orig_library_path = SysUtils.get_env_var('LD_LIBRARY_PATH_ORIG')
if orig_library_path:
SysUtils.set_env_var('LD_LIBRARY_PATH', orig_library_path)
else:
SysUtils.delete_env_var('LD_LIBRARY_PATH')
proc = subprocess.Popen(['/bin/sh', script_path],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
encoding='utf-8',
errors='ignore')
SysUtils.set_env_var('LD_LIBRARY_PATH', pyinstaller_library_path)
get_logger().debug("CONTAINER OUTPUT:\n %s", self.output)
for line in proc.stdout:
get_logger().debug(line.strip())
self.output = self.output + line
except subprocess.CalledProcessError as cpe:
# Exit with user script return code if an
# error occurs (Kubernetes handles the error)
get_logger().error(cpe.output.decode(encoding='utf-8', errors='ignore'))
sys.exit(cpe.returncode)
else:
get_logger().error('No user script found!')
def create_response(self):
if self.event_type and self.event_type == 'UNKNOWN':
# Check if there are files in $TMP_OUTPUT_DIR
output_dir = SysUtils.get_env_var('TMP_OUTPUT_DIR')
files = FileUtils.get_all_files_in_dir(output_dir)
if len(files) == 1:
# Return the file encoded in base64
file_content = FileUtils.read_file(files[0], 'rb')
return StrUtils.bytes_to_base64str(file_content)
if len(files) > 1:
# Generate a zip with all files and return it encoded in base64
zip_path = SysUtils.join_paths(output_dir, str(uuid.uuid4()))
FileUtils.zip_file_list(files, zip_path)
file_content = FileUtils.read_file(zip_path, 'rb')
return StrUtils.bytes_to_base64str(file_content)
return self.output
def create_error_response(self):
pass
| 45.635417
| 88
| 0.624515
|
import subprocess
import sys
import uuid
from faassupervisor.faas import DefaultSupervisor
from faassupervisor.logger import get_logger
from faassupervisor.utils import SysUtils, FileUtils, StrUtils
class BinarySupervisor(DefaultSupervisor):
_SCRIPT_FILE_NAME = 'script.sh'
_OSCAR_SCRIPT_PATH = '/oscar/config/script.sh'
def __init__(self, event_type):
self.output = ''
self.event_type = event_type
get_logger().info('SUPERVISOR: Initializing Binary supervisor')
def _get_script_path(self):
script_path = None
if SysUtils.is_var_in_env('SCRIPT'):
script_path = SysUtils.join_paths(SysUtils.get_env_var("TMP_INPUT_DIR"),
self._SCRIPT_FILE_NAME)
script_content = StrUtils.base64_to_str(SysUtils.get_env_var('SCRIPT'))
FileUtils.create_file_with_content(script_path, script_content)
get_logger().info("Script file created in '%s'", script_path)
elif FileUtils.is_file(self._OSCAR_SCRIPT_PATH):
script_path = self._OSCAR_SCRIPT_PATH
get_logger().info("Script file found in '%s'", script_path)
return script_path
def execute_function(self):
script_path = self._get_script_path()
if script_path:
try:
pyinstaller_library_path = SysUtils.get_env_var('LD_LIBRARY_PATH')
orig_library_path = SysUtils.get_env_var('LD_LIBRARY_PATH_ORIG')
if orig_library_path:
SysUtils.set_env_var('LD_LIBRARY_PATH', orig_library_path)
else:
SysUtils.delete_env_var('LD_LIBRARY_PATH')
proc = subprocess.Popen(['/bin/sh', script_path],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
encoding='utf-8',
errors='ignore')
SysUtils.set_env_var('LD_LIBRARY_PATH', pyinstaller_library_path)
get_logger().debug("CONTAINER OUTPUT:\n %s", self.output)
for line in proc.stdout:
get_logger().debug(line.strip())
self.output = self.output + line
except subprocess.CalledProcessError as cpe:
get_logger().error(cpe.output.decode(encoding='utf-8', errors='ignore'))
sys.exit(cpe.returncode)
else:
get_logger().error('No user script found!')
def create_response(self):
if self.event_type and self.event_type == 'UNKNOWN':
output_dir = SysUtils.get_env_var('TMP_OUTPUT_DIR')
files = FileUtils.get_all_files_in_dir(output_dir)
if len(files) == 1:
file_content = FileUtils.read_file(files[0], 'rb')
return StrUtils.bytes_to_base64str(file_content)
if len(files) > 1:
zip_path = SysUtils.join_paths(output_dir, str(uuid.uuid4()))
FileUtils.zip_file_list(files, zip_path)
file_content = FileUtils.read_file(zip_path, 'rb')
return StrUtils.bytes_to_base64str(file_content)
return self.output
def create_error_response(self):
pass
| true
| true
|
f7057fd5afafd3865f8482c22c4d763876ca6411
| 8,749
|
py
|
Python
|
src/main.py
|
Alamgir-K/Climate-Change-Simulation
|
2928ad1522d0371885dbd174ef14e5795e6282d2
|
[
"MIT"
] | null | null | null |
src/main.py
|
Alamgir-K/Climate-Change-Simulation
|
2928ad1522d0371885dbd174ef14e5795e6282d2
|
[
"MIT"
] | null | null | null |
src/main.py
|
Alamgir-K/Climate-Change-Simulation
|
2928ad1522d0371885dbd174ef14e5795e6282d2
|
[
"MIT"
] | null | null | null |
"""
Climate Change Project
"""
import plotly.graph_objects as go
from PIL import Image, ImageDraw, ImageFont
from computing_data import calc_high_actual_pd, \
calc_low_actual_pd, \
calc_median_actual_pd, \
make_high_rcp_list, make_low_rcp_list, \
make_median_rcp_list, rcp_to_slice, temp_to_rgb
from reading_data import read_actual_data, read_predicted_data, CITY_SET, MAP, CITY_TEMPS
def plot_temp_data(actual_temps_dict: dict, final_low_rcp_list: list, final_median_rcp_list: list,
final_high_rcp_list: list) -> None:
"""Plot a line and scatter graph of real and predicted temperatures
using plotly's line and scatter plots
"""
x = list(actual_temps_dict.keys())
actual_y = list(actual_temps_dict.values())
low_predicted_y = final_low_rcp_list
median_predicted_y = final_median_rcp_list
high_predicted_y = final_high_rcp_list
fig = go.Figure()
fig.add_trace(go.Scatter(x=x, y=low_predicted_y,
mode='lines+markers',
name='RCP 2.6 Predicted Temperature'))
fig.add_trace(go.Scatter(x=x, y=median_predicted_y,
mode='lines+markers',
name='RCP 4.5 Predicted Temperature'))
fig.add_trace(go.Scatter(x=x, y=high_predicted_y,
mode='lines+markers',
name='RCP 8.5 Predicted Temperature'))
fig.add_trace(go.Scatter(x=x, y=actual_y,
mode='lines+markers',
name='Actual Temperature'))
fig.update_layout(
title="Actual vs Predicted Temperature of " + city[3],
xaxis_title="Years",
yaxis_title="Temperature (Celsius)",
font=dict(
family="Courier New, monospace",
size=18)
)
fig.show()
def draw_table(actual_temps_dict: dict,
final_low_rcp_list: list,
final_median_rcp_list: list,
final_high_rcp_list: list,
low_rcp_percentage_difference: list,
median_rcp_percentage_difference: list,
high_rcp_percentage_difference: list) -> None:
"""
Draw a table using a plotly's basic table
"""
fig = go.Figure(data=[go.Table(header=dict(values=['Actual Temperature', 'RCP 2.6',
'% Difference of RCP 2.6 and Actual Temp',
'RCP 4.5',
'% Difference of RCP 4.5 and Actual Temp',
'RCP 8.5',
'% Difference of RCP 8.5 and Actual Temp'],
line_color='darkslategray',
fill_color='lightskyblue'),
cells=dict(values=[list(actual_temps_dict.values()),
final_low_rcp_list,
low_rcp_percentage_difference,
final_median_rcp_list,
median_rcp_percentage_difference,
final_high_rcp_list,
high_rcp_percentage_difference]))])
fig.update_layout(
title="Actual vs Predicted Temperature of " + city[3]
)
fig.show()
def draw_map(rcp_type: str) -> None:
"""
Draws both maps for predicted and actual temperature of the cities in Canada
"""
map = Image.open(MAP)
width, height = map.size
new_map = Image.new('RGB', (width * 2, height + 80))
# fills the cities for the actual map
for city in CITY_SET:
temp = CITY_TEMPS[city][0]
ImageDraw.floodfill(map, city[2], temp_to_rgb(temp), thresh=50)
map2 = Image.open(MAP)
# fills the cities for the predicted map
for city in CITY_SET:
temp = CITY_TEMPS[city][rcp_to_slice(rcp_type)]
ImageDraw.floodfill(map2, city[2], temp_to_rgb(temp), thresh=50)
new_map.paste(map, (0, 80))
new_map.paste(map2, (width, 80))
# Writes the titles
title_font = ImageFont.truetype("arial.ttf", 50)
new_map_editable = ImageDraw.Draw(new_map)
new_map_editable.text((width // 3, 10),
'Actual Temperatures(' + year + ')', font=title_font)
new_map_editable.text((int(1.3 * width), 10),
'Predicted Temperatures(' + year + ')', font=title_font)
new_map.show()
def run(city: tuple, year: int, city_name: str) -> None:
"""
Runs the code for one city
"""
actual_temps_dict = read_actual_data(city[0])
predicted_temps_dict = read_predicted_data(city[1], actual_temps_dict)
if city[3].lower() == city_name.lower():
final_low_rcp_list = make_low_rcp_list(predicted_temps_dict)
low_rcp_percentage_difference = \
calc_low_actual_pd(actual_temps_dict, final_low_rcp_list)
final_median_rcp_list = make_median_rcp_list(predicted_temps_dict)
median_rcp_percentage_difference = \
calc_median_actual_pd(actual_temps_dict,
final_median_rcp_list)
final_high_rcp_list = make_high_rcp_list(predicted_temps_dict)
high_rcp_percentage_difference = \
calc_high_actual_pd(actual_temps_dict,
final_high_rcp_list)
plot_temp_data(actual_temps_dict, final_low_rcp_list,
final_median_rcp_list, final_high_rcp_list)
draw_table(actual_temps_dict, final_low_rcp_list, final_median_rcp_list,
final_high_rcp_list,
low_rcp_percentage_difference, median_rcp_percentage_difference,
high_rcp_percentage_difference)
temperatures = [actual_temps_dict[year], predicted_temps_dict[year]['RCP 2.6'],
predicted_temps_dict[year]['RCP 4.5'], predicted_temps_dict[year]['RCP 8.5']]
CITY_TEMPS[city] = temperatures
# this is the main part of the program that calls every function
if __name__ == '__main__':
year = input('Write the year for the map to display data from '
'(in range of 2003-2019 inclusive)')
if not 2003 <= int(year) <= 2019:
year = input('Try again. Write the number between 2003 and 2019 inclusive')
city_name = input(
'Type the name of the city you want to display its stats on graph'
'(TORONTO, QUEBEC, HALIFAX, WINNIPEG)')
if city_name.lower() not in ('toronto', 'halifax', 'quebec', 'winnipeg'):
city_name = input(
'Try again. Type Toronto or Quebec or Halifax or Winnipeg')
rcp_type = input(
'Write an RCP value for the map to display on the "predicted" side.'
'(write RCP 2.6 or RCP 4.5 or RCP 8.5)')
if rcp_type not in ('RCP 2.6', 'RCP 4.5', 'RCP 8.5'):
rcp_type = input('Try again. Write RCP 2.6 or RCP 4.5 or RCP 8.5)')
while True:
for city in CITY_SET:
run(city, int(year), city_name)
draw_map(rcp_type)
year = input('Write the year for the map to display data from '
'(in range of 2003-2019 inclusive). '
'Type 2 wrong answers to exit')
if not 2003 <= int(year) <= 2019:
year = input('Try again. Write the number between 2003 and 2019 inclusive. '
'Type a wrong answer to exit')
if not 2003 <= int(year) <= 2019:
break
city_name = input(
'Type the name of the city you want to display its stats on graph'
'(TORONTO, QUEBEC, HALIFAX, WINNIPEG) Type 2 wrong answers to exit.')
if city_name.lower() not in ('toronto', 'halifax', 'quebec', 'winnipeg'):
city_name = input(
'Try again. Type Toronto or Quebec or Halifax or Winnipeg. '
'Type a wrong answer to exit.')
if city_name.lower() not in ('toronto', 'halifax', 'quebec', 'winnipeg'):
break
rcp_type = input(
'Write an RCP value for the map to display on the "predicted" side.'
'(write RCP 2.6 or RCP 4.5 or RCP 8.5) Type 2 wrong answers to exit')
if rcp_type not in ('RCP 2.6', 'RCP 4.5', 'RCP 8.5'):
rcp_type = input('Try again. Write RCP 2.6 or RCP 4.5 or RCP 8.5'
'Type a wrong answer to exit.')
if rcp_type not in ('RCP 2.6', 'RCP 4.5', 'RCP 8.5'):
break
| 42.470874
| 98
| 0.572865
|
import plotly.graph_objects as go
from PIL import Image, ImageDraw, ImageFont
from computing_data import calc_high_actual_pd, \
calc_low_actual_pd, \
calc_median_actual_pd, \
make_high_rcp_list, make_low_rcp_list, \
make_median_rcp_list, rcp_to_slice, temp_to_rgb
from reading_data import read_actual_data, read_predicted_data, CITY_SET, MAP, CITY_TEMPS
def plot_temp_data(actual_temps_dict: dict, final_low_rcp_list: list, final_median_rcp_list: list,
final_high_rcp_list: list) -> None:
x = list(actual_temps_dict.keys())
actual_y = list(actual_temps_dict.values())
low_predicted_y = final_low_rcp_list
median_predicted_y = final_median_rcp_list
high_predicted_y = final_high_rcp_list
fig = go.Figure()
fig.add_trace(go.Scatter(x=x, y=low_predicted_y,
mode='lines+markers',
name='RCP 2.6 Predicted Temperature'))
fig.add_trace(go.Scatter(x=x, y=median_predicted_y,
mode='lines+markers',
name='RCP 4.5 Predicted Temperature'))
fig.add_trace(go.Scatter(x=x, y=high_predicted_y,
mode='lines+markers',
name='RCP 8.5 Predicted Temperature'))
fig.add_trace(go.Scatter(x=x, y=actual_y,
mode='lines+markers',
name='Actual Temperature'))
fig.update_layout(
title="Actual vs Predicted Temperature of " + city[3],
xaxis_title="Years",
yaxis_title="Temperature (Celsius)",
font=dict(
family="Courier New, monospace",
size=18)
)
fig.show()
def draw_table(actual_temps_dict: dict,
final_low_rcp_list: list,
final_median_rcp_list: list,
final_high_rcp_list: list,
low_rcp_percentage_difference: list,
median_rcp_percentage_difference: list,
high_rcp_percentage_difference: list) -> None:
fig = go.Figure(data=[go.Table(header=dict(values=['Actual Temperature', 'RCP 2.6',
'% Difference of RCP 2.6 and Actual Temp',
'RCP 4.5',
'% Difference of RCP 4.5 and Actual Temp',
'RCP 8.5',
'% Difference of RCP 8.5 and Actual Temp'],
line_color='darkslategray',
fill_color='lightskyblue'),
cells=dict(values=[list(actual_temps_dict.values()),
final_low_rcp_list,
low_rcp_percentage_difference,
final_median_rcp_list,
median_rcp_percentage_difference,
final_high_rcp_list,
high_rcp_percentage_difference]))])
fig.update_layout(
title="Actual vs Predicted Temperature of " + city[3]
)
fig.show()
def draw_map(rcp_type: str) -> None:
map = Image.open(MAP)
width, height = map.size
new_map = Image.new('RGB', (width * 2, height + 80))
for city in CITY_SET:
temp = CITY_TEMPS[city][0]
ImageDraw.floodfill(map, city[2], temp_to_rgb(temp), thresh=50)
map2 = Image.open(MAP)
for city in CITY_SET:
temp = CITY_TEMPS[city][rcp_to_slice(rcp_type)]
ImageDraw.floodfill(map2, city[2], temp_to_rgb(temp), thresh=50)
new_map.paste(map, (0, 80))
new_map.paste(map2, (width, 80))
title_font = ImageFont.truetype("arial.ttf", 50)
new_map_editable = ImageDraw.Draw(new_map)
new_map_editable.text((width // 3, 10),
'Actual Temperatures(' + year + ')', font=title_font)
new_map_editable.text((int(1.3 * width), 10),
'Predicted Temperatures(' + year + ')', font=title_font)
new_map.show()
def run(city: tuple, year: int, city_name: str) -> None:
actual_temps_dict = read_actual_data(city[0])
predicted_temps_dict = read_predicted_data(city[1], actual_temps_dict)
if city[3].lower() == city_name.lower():
final_low_rcp_list = make_low_rcp_list(predicted_temps_dict)
low_rcp_percentage_difference = \
calc_low_actual_pd(actual_temps_dict, final_low_rcp_list)
final_median_rcp_list = make_median_rcp_list(predicted_temps_dict)
median_rcp_percentage_difference = \
calc_median_actual_pd(actual_temps_dict,
final_median_rcp_list)
final_high_rcp_list = make_high_rcp_list(predicted_temps_dict)
high_rcp_percentage_difference = \
calc_high_actual_pd(actual_temps_dict,
final_high_rcp_list)
plot_temp_data(actual_temps_dict, final_low_rcp_list,
final_median_rcp_list, final_high_rcp_list)
draw_table(actual_temps_dict, final_low_rcp_list, final_median_rcp_list,
final_high_rcp_list,
low_rcp_percentage_difference, median_rcp_percentage_difference,
high_rcp_percentage_difference)
temperatures = [actual_temps_dict[year], predicted_temps_dict[year]['RCP 2.6'],
predicted_temps_dict[year]['RCP 4.5'], predicted_temps_dict[year]['RCP 8.5']]
CITY_TEMPS[city] = temperatures
if __name__ == '__main__':
year = input('Write the year for the map to display data from '
'(in range of 2003-2019 inclusive)')
if not 2003 <= int(year) <= 2019:
year = input('Try again. Write the number between 2003 and 2019 inclusive')
city_name = input(
'Type the name of the city you want to display its stats on graph'
'(TORONTO, QUEBEC, HALIFAX, WINNIPEG)')
if city_name.lower() not in ('toronto', 'halifax', 'quebec', 'winnipeg'):
city_name = input(
'Try again. Type Toronto or Quebec or Halifax or Winnipeg')
rcp_type = input(
'Write an RCP value for the map to display on the "predicted" side.'
'(write RCP 2.6 or RCP 4.5 or RCP 8.5)')
if rcp_type not in ('RCP 2.6', 'RCP 4.5', 'RCP 8.5'):
rcp_type = input('Try again. Write RCP 2.6 or RCP 4.5 or RCP 8.5)')
while True:
for city in CITY_SET:
run(city, int(year), city_name)
draw_map(rcp_type)
year = input('Write the year for the map to display data from '
'(in range of 2003-2019 inclusive). '
'Type 2 wrong answers to exit')
if not 2003 <= int(year) <= 2019:
year = input('Try again. Write the number between 2003 and 2019 inclusive. '
'Type a wrong answer to exit')
if not 2003 <= int(year) <= 2019:
break
city_name = input(
'Type the name of the city you want to display its stats on graph'
'(TORONTO, QUEBEC, HALIFAX, WINNIPEG) Type 2 wrong answers to exit.')
if city_name.lower() not in ('toronto', 'halifax', 'quebec', 'winnipeg'):
city_name = input(
'Try again. Type Toronto or Quebec or Halifax or Winnipeg. '
'Type a wrong answer to exit.')
if city_name.lower() not in ('toronto', 'halifax', 'quebec', 'winnipeg'):
break
rcp_type = input(
'Write an RCP value for the map to display on the "predicted" side.'
'(write RCP 2.6 or RCP 4.5 or RCP 8.5) Type 2 wrong answers to exit')
if rcp_type not in ('RCP 2.6', 'RCP 4.5', 'RCP 8.5'):
rcp_type = input('Try again. Write RCP 2.6 or RCP 4.5 or RCP 8.5'
'Type a wrong answer to exit.')
if rcp_type not in ('RCP 2.6', 'RCP 4.5', 'RCP 8.5'):
break
| true
| true
|
f705805a5a9535dbea10b25388e800f47b46988d
| 1,937
|
py
|
Python
|
pyscf/nao/tddft_iter_x_zip.py
|
robert-anderson/pyscf
|
cdc56e168cb15f47e8cdc791a92d689fa9b655af
|
[
"Apache-2.0"
] | 3
|
2021-02-28T00:52:53.000Z
|
2021-03-01T06:23:33.000Z
|
pyscf/nao/tddft_iter_x_zip.py
|
robert-anderson/pyscf
|
cdc56e168cb15f47e8cdc791a92d689fa9b655af
|
[
"Apache-2.0"
] | 36
|
2018-08-22T19:44:03.000Z
|
2020-05-09T10:02:36.000Z
|
pyscf/nao/tddft_iter_x_zip.py
|
robert-anderson/pyscf
|
cdc56e168cb15f47e8cdc791a92d689fa9b655af
|
[
"Apache-2.0"
] | 4
|
2018-02-14T16:28:28.000Z
|
2019-08-12T16:40:30.000Z
|
from __future__ import print_function, division
from numpy import array, argmax
from pyscf.nao import tddft_iter
class tddft_iter_x_zip(tddft_iter):
""" Iterative TDDFT with a high-energy part of the KS eigenvectors compressed """
def __init__(self, **kw):
from pyscf.nao.m_fermi_dirac import fermi_dirac_occupations
tddft_iter.__init__(self, **kw)
self.x_zip = kw['x_zip'] if 'x_zip' in kw else False
self.x_zip_eps = kw['x_zip_eps'] if 'x_zip_eps' in kw else 0.05
self.x_zip_emax = kw['x_zip_emax'] if 'x_zip_emax' in kw else 0.25
if self.x_zip: # redefine the eigenvectors
sm2e,sma2x = self.build_x_zip()
if self.verbosity>0:
print(__name__, 'self.mo_energy.shape =', self.mo_energy.shape)
print(__name__, 'sm2e.shape =', sm2e.shape)
self.ksn2e = array([sm2e])
ksn2fd = fermi_dirac_occupations(self.telec, self.ksn2e, self.fermi_energy)
for s,n2fd in enumerate(ksn2fd[0]):
if not all(n2fd>self.nfermi_tol): continue
print(self.telec, s, nfermi_tol, n2fd)
raise RuntimeError(__name__, 'telec is too high?')
self.ksn2f = (3-self.nspin)*ksn2fd
self.nfermi = array([argmax(ksn2fd[0,s,:]<self.nfermi_tol) for s in range(self.nspin)], dtype=int)
self.vstart = array([argmax(1.0-ksn2fd[0,s,:]>=self.nfermi_tol) for s in range(self.nspin)], dtype=int)
self.xocc = [ma2x[:nfermi,:] for ma2x,nfermi in zip(sma2x,self.nfermi)]
self.xvrt = [ma2x[vstart:,:] for ma2x,vstart in zip(sma2x,self.vstart)]
def build_x_zip(self):
""" define compressed eigenvectors """
from pyscf.nao.m_x_zip import x_zip
sm2e = []
sma2x = []
for n2e,na2x in zip(self.mo_energy[0], self.mo_coeff[0,:,:,:,0]):
vst, i2w,i2dos, m2e, ma2x = x_zip(n2e, na2x, eps=self.x_zip_eps, emax=self.x_zip_emax)
sm2e.append(m2e)
sma2x.append(ma2x)
sm2e = array(sm2e)
return sm2e, sma2x
| 42.108696
| 109
| 0.669592
|
from __future__ import print_function, division
from numpy import array, argmax
from pyscf.nao import tddft_iter
class tddft_iter_x_zip(tddft_iter):
def __init__(self, **kw):
from pyscf.nao.m_fermi_dirac import fermi_dirac_occupations
tddft_iter.__init__(self, **kw)
self.x_zip = kw['x_zip'] if 'x_zip' in kw else False
self.x_zip_eps = kw['x_zip_eps'] if 'x_zip_eps' in kw else 0.05
self.x_zip_emax = kw['x_zip_emax'] if 'x_zip_emax' in kw else 0.25
if self.x_zip:
sm2e,sma2x = self.build_x_zip()
if self.verbosity>0:
print(__name__, 'self.mo_energy.shape =', self.mo_energy.shape)
print(__name__, 'sm2e.shape =', sm2e.shape)
self.ksn2e = array([sm2e])
ksn2fd = fermi_dirac_occupations(self.telec, self.ksn2e, self.fermi_energy)
for s,n2fd in enumerate(ksn2fd[0]):
if not all(n2fd>self.nfermi_tol): continue
print(self.telec, s, nfermi_tol, n2fd)
raise RuntimeError(__name__, 'telec is too high?')
self.ksn2f = (3-self.nspin)*ksn2fd
self.nfermi = array([argmax(ksn2fd[0,s,:]<self.nfermi_tol) for s in range(self.nspin)], dtype=int)
self.vstart = array([argmax(1.0-ksn2fd[0,s,:]>=self.nfermi_tol) for s in range(self.nspin)], dtype=int)
self.xocc = [ma2x[:nfermi,:] for ma2x,nfermi in zip(sma2x,self.nfermi)]
self.xvrt = [ma2x[vstart:,:] for ma2x,vstart in zip(sma2x,self.vstart)]
def build_x_zip(self):
from pyscf.nao.m_x_zip import x_zip
sm2e = []
sma2x = []
for n2e,na2x in zip(self.mo_energy[0], self.mo_coeff[0,:,:,:,0]):
vst, i2w,i2dos, m2e, ma2x = x_zip(n2e, na2x, eps=self.x_zip_eps, emax=self.x_zip_emax)
sm2e.append(m2e)
sma2x.append(ma2x)
sm2e = array(sm2e)
return sm2e, sma2x
| true
| true
|
f705809ff2c93a8fa8ce2eda3a50fb2aa0ec5726
| 200
|
py
|
Python
|
http_request_randomizer/requests/errors/ProxyListException.py
|
nderkach/HTTP_Request_Randomizer
|
48f445ff2315c27e096a5ee3165329b637095e83
|
[
"MIT"
] | 146
|
2016-01-20T22:36:25.000Z
|
2022-03-25T12:55:33.000Z
|
http_request_randomizer/requests/errors/ProxyListException.py
|
nderkach/HTTP_Request_Randomizer
|
48f445ff2315c27e096a5ee3165329b637095e83
|
[
"MIT"
] | 70
|
2016-07-11T18:14:08.000Z
|
2022-02-03T05:12:37.000Z
|
http_request_randomizer/requests/errors/ProxyListException.py
|
nderkach/HTTP_Request_Randomizer
|
48f445ff2315c27e096a5ee3165329b637095e83
|
[
"MIT"
] | 61
|
2016-06-07T01:16:21.000Z
|
2022-02-21T19:13:22.000Z
|
class ProxyListException(Exception):
def __init___(self, extraArguments):
Exception.__init__(self, " was raised - {0}".format(extraArguments))
self.dErrorArguments = extraArguments
| 50
| 76
| 0.73
|
class ProxyListException(Exception):
def __init___(self, extraArguments):
Exception.__init__(self, " was raised - {0}".format(extraArguments))
self.dErrorArguments = extraArguments
| true
| true
|
f70580d96b98a06da9dda9cd1e9dc054bb30b99f
| 22,694
|
py
|
Python
|
arcade/tilemap.py
|
Mr-Coxall/arcade
|
7767e9c7d7395c0dd35479744052f18ac8c86679
|
[
"MIT"
] | null | null | null |
arcade/tilemap.py
|
Mr-Coxall/arcade
|
7767e9c7d7395c0dd35479744052f18ac8c86679
|
[
"MIT"
] | null | null | null |
arcade/tilemap.py
|
Mr-Coxall/arcade
|
7767e9c7d7395c0dd35479744052f18ac8c86679
|
[
"MIT"
] | null | null | null |
"""
Functions and classes for managing a map saved in the .tmx format.
Typically these .tmx maps are created using the `Tiled Map Editor`_.
For more information, see the `Platformer Tutorial`_.
.. _Tiled Map Editor: https://www.mapeditor.org/
.. _Platformer Tutorial: http://arcade.academy/examples/platform_tutorial/index.html
"""
import copy
import math
import os
from pathlib import Path
from typing import List, Optional, Tuple, Union, cast
import pytiled_parser
from arcade import (
AnimatedTimeBasedSprite,
AnimationKeyframe,
Sprite,
SpriteList,
load_texture,
)
from arcade.arcade_types import Point
from arcade.resources import resolve_resource_path
_FLIPPED_HORIZONTALLY_FLAG = 0x80000000
_FLIPPED_VERTICALLY_FLAG = 0x40000000
_FLIPPED_DIAGONALLY_FLAG = 0x20000000
def read_tmx(map_file: Union[str, Path]) -> pytiled_parser.TiledMap:
raise DeprecationWarning("The read_tmx function has been replaced with read_map. Use this function and convert your .tmx files to .json using the Tiled editor.")
def read_map(map_file: Union[str, Path]) -> pytiled_parser.TiledMap:
"""
Given a .json file, this will read in a tiled map, and return
a TiledMap object.
Important: Tiles must be a "collection" of images.
Hitboxes can be drawn around tiles in the tileset editor,
but only polygons are supported.
(This is a great area for PR's to improve things.)
:param str json_file: String with name of our JSON Tiled file
:returns: Map
:rtype: TiledMap
"""
# If we should pull from local resources, replace with proper path
map_file = resolve_resource_path(map_file)
tile_map = pytiled_parser.parse_map(map_file)
return tile_map
def get_cartesian(
map_object: pytiled_parser.TiledMap, coordinates: pytiled_parser.OrderedPair
) -> pytiled_parser.OrderedPair:
"""
Given a TiledMap and a set of coordinates, this returns the cartesian coordinates
This assumed the supplied coordinates are pixel coordinates, and bases the cartesian
grid off of the Map's tile size.
So if you have a map with 128x128 pixel Tiles, and you supply coordinates 500, 250 to
this function you'll receive back 3, 2.
This works by taking the floor of the quotient of the pixel coordinate divided by the
tile size.
:param pytiled_parser.TiledMap map_object: The map to pull tile size from
:param pytiled_parser.OrderedPair coordinates: The pixel coordinates to convert
"""
x = math.floor(coordinates.x / map_object.tile_size.width)
y = math.floor(coordinates.y / map_object.tile_size.height)
return pytiled_parser.OrderedPair(x, y)
def get_tilemap_layer(
map_object: pytiled_parser.TiledMap, layer_path: str
) -> Optional[pytiled_parser.Layer]:
"""
Given a TiledMap and a layer path, this returns the TileLayer.
:param pytiled_parser.objects.TileMap map_object: The map read in by the read_tmx function.
:param str layer_path: A string to match the layer name. Case sensitive.
:returns: A TileLayer, or None if no layer was found.
"""
assert isinstance(map_object, pytiled_parser.TiledMap)
assert isinstance(layer_path, str)
def _get_tilemap_layer(path, layers):
layer_name = path.pop(0)
for layer in layers:
if layer.name == layer_name:
if isinstance(layer, pytiled_parser.LayerGroup):
if len(path) != 0:
return _get_tilemap_layer(path, layer.layers)
else:
return layer
return None
path = layer_path.strip("/").split("/")
layer = _get_tilemap_layer(path, map_object.layers)
return layer
def _get_tile_by_gid(
map_object: pytiled_parser.TiledMap, tile_gid: int
) -> Optional[pytiled_parser.Tile]:
flipped_diagonally = False
flipped_horizontally = False
flipped_vertically = False
if tile_gid & _FLIPPED_HORIZONTALLY_FLAG:
flipped_horizontally = True
tile_gid -= _FLIPPED_HORIZONTALLY_FLAG
if tile_gid & _FLIPPED_DIAGONALLY_FLAG:
flipped_diagonally = True
tile_gid -= _FLIPPED_DIAGONALLY_FLAG
if tile_gid & _FLIPPED_VERTICALLY_FLAG:
flipped_vertically = True
tile_gid -= _FLIPPED_VERTICALLY_FLAG
for tileset_key, tileset in map_object.tilesets.items():
if tile_gid < tileset_key:
continue
# No specific tile info, but there is a tile sheet
if (
tileset.tiles is None
and tileset.image is not None
and tileset_key <= tile_gid < tileset_key + tileset.tile_count
):
tile_ref = pytiled_parser.Tile(
id=(tile_gid - tileset_key), image=tileset.image
)
else:
tile_ref = tileset.tiles.get(tile_gid - tileset_key)
if tile_ref:
my_tile = copy.copy(tile_ref)
my_tile.tileset = tileset
my_tile.flipped_vertically = flipped_vertically
my_tile.flipped_diagonally = flipped_diagonally
my_tile.flipped_horizontally = flipped_horizontally
return my_tile
return None
def _get_tile_by_id(
map_object: pytiled_parser.TiledMap, tileset: pytiled_parser.Tileset, tile_id: int
) -> Optional[pytiled_parser.Tile]:
for tileset_key, cur_tileset in map_object.tilesets.items():
if cur_tileset is tileset:
for tile_key, tile in cur_tileset.tiles.items():
if tile_id == tile.id:
return tile
return None
def _get_image_info_from_tileset(tile):
image_x = 0
image_y = 0
if tile.tileset.image is not None:
margin = tile.tileset.margin or 0
spacing = tile.tileset.spacing or 0
row = tile.id // tile.tileset.columns
image_y = margin + row * (tile.tileset.tile_height + spacing)
col = tile.id % tile.tileset.columns
image_x = margin + col * (tile.tileset.tile_width + spacing)
if tile.tileset.image:
# Sprite sheet, use max width/height from sheet
width = tile.tileset.tile_width
height = tile.tileset.tile_height
else:
# Individual image, use image width and height
width = tile.image_width
height = tile.image_height
return image_x, image_y, width, height
def _get_image_source(
tile: pytiled_parser.Tile,
base_directory: Optional[str],
map_directory: Optional[str],
):
image_file = None
if tile.image:
image_file = tile.image
elif tile.tileset.image:
image_file = tile.tileset.image
if not image_file:
print(
f"Warning for tile {tile.id_}, no image source listed either for individual tile, or as a tileset."
)
return None
if os.path.exists(image_file):
return image_file
if base_directory:
try2 = Path(base_directory, image_file)
if os.path.exists(try2):
return try2
if map_directory:
try3 = Path(map_directory, image_file)
if os.path.exists(try3):
return try3
print(
f"Warning, can't find image {image_file} for tile {tile.id} - {base_directory}"
)
return None
def _create_sprite_from_tile(
map_object: pytiled_parser.TiledMap,
tile: pytiled_parser.Tile,
scaling: float = 1.0,
base_directory: str = None,
hit_box_algorithm="Simple",
hit_box_detail: float = 4.5,
):
"""
Given a tile from the parser, see if we can create a sprite from it
"""
# --- Step 1, find a reference to an image this is going to be based off of
map_source = map_object.map_file
map_directory = os.path.dirname(map_source)
image_file = _get_image_source(tile, base_directory, map_directory)
# print(f"Creating tile: {tmx_file}")
if tile.animation:
# my_sprite = AnimatedTimeSprite(tmx_file, scaling)
my_sprite: Sprite = AnimatedTimeBasedSprite(image_file, scaling)
else:
image_x, image_y, width, height = _get_image_info_from_tileset(tile)
my_sprite = Sprite(
image_file,
scaling,
image_x,
image_y,
width,
height,
flipped_horizontally=tile.flipped_horizontally,
flipped_vertically=tile.flipped_vertically,
flipped_diagonally=tile.flipped_diagonally,
hit_box_algorithm=hit_box_algorithm,
hit_box_detail=hit_box_detail,
)
if tile.properties is not None and len(tile.properties) > 0:
for key, value in tile.properties.items():
my_sprite.properties[key] = value
if tile.type:
my_sprite.properties["type"] = tile.type
# print(tile.image.source, my_sprite.center_x, my_sprite.center_y)
if tile.objects is not None:
if len(tile.objects.tiled_objects) > 1:
print(
f"Warning, only one hit box supported for tile with image {tile.image.source}."
)
for hitbox in tile.objects.tiled_objects:
points: List[Point] = []
if isinstance(hitbox, pytiled_parser.tiled_object.Rectangle):
if hitbox.size is None:
print(
f"Warning: Rectangle hitbox created for without a "
f"height or width for {tile.image.source}. Ignoring."
)
continue
# print(my_sprite.width, my_sprite.height)
sx = hitbox.coordinates.x - (my_sprite.width / (scaling * 2))
sy = -(hitbox.coordinates.y - (my_sprite.height / (scaling * 2)))
ex = (hitbox.coordinates.x + hitbox.size.width) - (
my_sprite.width / (scaling * 2)
)
ey = -(
(hitbox.coordinates.y + hitbox.size.height)
- (my_sprite.height / (scaling * 2))
)
# print(f"Size: {hitbox.size} Location: {hitbox.location}")
p1 = [sx, sy]
p2 = [ex, sy]
p3 = [ex, ey]
p4 = [sx, ey]
# print(f"w:{my_sprite.width:.1f}, h:{my_sprite.height:.1f}", end=", ")
points = [p1, p2, p3, p4]
# for point in points:
# print(f"({point[0]:.1f}, {point[1]:.1f}) ")
# print()
elif isinstance(hitbox, pytiled_parser.tiled_object.Polygon) or isinstance(
hitbox, pytiled_parser.tiled_object.Polyline
):
for point in hitbox.points:
adj_x = (
point.x + hitbox.coordinates.x - my_sprite.width / (scaling * 2)
)
adj_y = -(
point.y
+ hitbox.coordinates.y
- my_sprite.height / (scaling * 2)
)
adj_point = [adj_x, adj_y]
points.append(adj_point)
# If we have a polyline, and it is closed, we need to
# remove the duplicate end-point
if points[0][0] == points[-1][0] and points[0][1] == points[-1][1]:
points.pop()
elif isinstance(hitbox, pytiled_parser.tiled_object.Ellipse):
if hitbox.size is None:
print(
f"Warning: Ellipse hitbox created for without a height "
f"or width for {tile.image.source}. Ignoring."
)
continue
# print(f"Size: {hitbox.size} Location: {hitbox.location}")
hw = hitbox.size.width / 2
hh = hitbox.size.height / 2
cx = hitbox.coordinates.x + hw
cy = hitbox.coordinates.y + hh
acx = cx - (my_sprite.width / (scaling * 2))
acy = cy - (my_sprite.height / (scaling * 2))
# print(f"acx: {acx} acy: {acy} cx: {cx} cy: {cy} hh: {hh} hw: {hw}")
total_steps = 8
angles = [
step / total_steps * 2 * math.pi for step in range(total_steps)
]
for angle in angles:
x = hw * math.cos(angle) + acx
y = -(hh * math.sin(angle) + acy)
point = [x, y]
points.append(point)
# for point in points:
# print(f"({point[0]:.1f}, {point[1]:.1f}) ")
# print()
else:
print(f"Warning: Hitbox type {type(hitbox)} not supported.")
my_sprite.set_hit_box(points)
if tile.animation is not None:
# Animated image
key_frame_list = []
# Loop through each frame
for frame in tile.animation:
# Get the tile for the frame
frame_tile = _get_tile_by_id(map_object, tile.tileset, frame.tile_id)
if frame_tile:
image_file = _get_image_source(
frame_tile, base_directory, map_directory
)
# Does the tile have an image?
if frame_tile.image:
# Yes, use it
texture = load_texture(image_file)
else:
# No image for tile? Pull from tilesheet
image_x, image_y, width, height = _get_image_info_from_tileset(
frame_tile
)
texture = load_texture(image_file, image_x, image_y, width, height)
key_frame = AnimationKeyframe(frame.tile_id, frame.duration, texture)
key_frame_list.append(key_frame)
# If this is the first texture in the animation, go ahead and
# set it as the current texture.
if len(key_frame_list) == 1:
my_sprite.texture = key_frame.texture
# print(f"Add tile {frame.tile_id} for keyframe. Source: {frame_tile.image.source}")
cast(AnimatedTimeBasedSprite, my_sprite).frames = key_frame_list
return my_sprite
def _process_object_layer(
map_object: pytiled_parser.TiledMap,
layer: pytiled_parser.ObjectLayer,
scaling: float = 1,
base_directory: str = "",
use_spatial_hash: Optional[bool] = None,
hit_box_algorithm="Simple",
hit_box_detail=4.5,
) -> SpriteList:
sprite_list: SpriteList = SpriteList(use_spatial_hash=use_spatial_hash)
for cur_object in layer.tiled_objects:
if not hasattr(cur_object, "gid"):
print(
"Warning: Currently only tiles (not objects) are supported in object layers."
)
continue
tile = _get_tile_by_gid(map_object, cur_object.gid)
my_sprite = _create_sprite_from_tile(
map_object,
tile,
scaling=scaling,
base_directory=base_directory,
hit_box_algorithm=hit_box_algorithm,
hit_box_detail=hit_box_detail,
)
x = cur_object.coordinates.x * scaling
y = (
map_object.map_size.height * map_object.tile_size[1]
- cur_object.coordinates.y
) * scaling
my_sprite.width = width = cur_object.size[0] * scaling
my_sprite.height = height = cur_object.size[1] * scaling
center_x = width / 2
center_y = height / 2
if cur_object.rotation is not None:
rotation = -math.radians(cur_object.rotation)
else:
rotation = 0
cos_rotation = math.cos(rotation)
sin_rotation = math.sin(rotation)
rotated_center_x = center_x * cos_rotation - center_y * sin_rotation
rotated_center_y = center_x * sin_rotation + center_y * cos_rotation
my_sprite.position = (x + rotated_center_x, y + rotated_center_y)
my_sprite.angle = math.degrees(rotation)
# Opacity
opacity = layer.opacity
if opacity:
my_sprite.alpha = int(opacity * 255)
# Properties
if cur_object.properties is not None and "change_x" in cur_object.properties:
my_sprite.change_x = float(cur_object.properties["change_x"])
if cur_object.properties is not None and "change_y" in cur_object.properties:
my_sprite.change_y = float(cur_object.properties["change_y"])
if (
cur_object.properties is not None
and "boundary_bottom" in cur_object.properties
):
my_sprite.boundary_bottom = float(cur_object.properties["boundary_bottom"])
if (
cur_object.properties is not None
and "boundary_top" in cur_object.properties
):
my_sprite.boundary_top = float(cur_object.properties["boundary_top"])
if (
cur_object.properties is not None
and "boundary_left" in cur_object.properties
):
my_sprite.boundary_left = float(cur_object.properties["boundary_left"])
if (
cur_object.properties is not None
and "boundary_right" in cur_object.properties
):
my_sprite.boundary_right = float(cur_object.properties["boundary_right"])
if cur_object.properties is not None:
my_sprite.properties.update(cur_object.properties)
if cur_object.type:
my_sprite.properties["type"] = cur_object.type
if cur_object.name:
my_sprite.properties["name"] = cur_object.name
sprite_list.append(my_sprite)
return sprite_list
def _process_tile_layer(
map_object: pytiled_parser.TiledMap,
layer: pytiled_parser.TileLayer,
scaling: float = 1,
base_directory: str = "",
use_spatial_hash: Optional[bool] = None,
hit_box_algorithm="Simple",
hit_box_detail: float = 4.5,
) -> SpriteList:
sprite_list: SpriteList = SpriteList(use_spatial_hash=use_spatial_hash)
map_array = layer.data
# Loop through the layer and add in the wall list
for row_index, row in enumerate(map_array):
for column_index, item in enumerate(row):
# Check for empty square
if item == 0:
continue
tile = _get_tile_by_gid(map_object, item)
if tile is None:
error_msg = (
f"Warning, couldn't find tile for item {item} in layer "
f"'{layer.name}' in file '{map_object.map_file}'."
)
raise ValueError(error_msg)
my_sprite = _create_sprite_from_tile(
map_object,
tile,
scaling=scaling,
base_directory=base_directory,
hit_box_algorithm=hit_box_algorithm,
hit_box_detail=hit_box_detail,
)
if my_sprite is None:
print(
f"Warning: Could not create sprite number {item} in layer '{layer.name}' {tile.image.source}"
)
else:
my_sprite.center_x = (
column_index * (map_object.tile_size[0] * scaling)
+ my_sprite.width / 2
)
my_sprite.center_y = (map_object.map_size.height - row_index - 1) * (
map_object.tile_size[1] * scaling
) + my_sprite.height / 2
# Opacity
opacity = layer.opacity
if opacity:
my_sprite.alpha = int(opacity * 255)
sprite_list.append(my_sprite)
return sprite_list
def process_layer(
map_object: pytiled_parser.TiledMap,
layer_name: str,
scaling: float = 1,
base_directory: str = "",
use_spatial_hash: Optional[bool] = None,
hit_box_algorithm="Simple",
hit_box_detail: float = 4.5,
) -> SpriteList:
"""
This takes a map layer returned by the read_tmx function, and creates Sprites for it.
:param map_object: The TileMap read in by read_tmx.
:param layer_name: The name of the layer that we are creating sprites for.
:param scaling: Scaling the layer up or down.
(Note, any number besides 1 can create a tearing effect,
if numbers don't evenly divide.)
:param base_directory: Base directory of the file, that we start from to
load images.
:param use_spatial_hash: If all, or at least 75%, of the loaded tiles will not
move between frames and you are using either the
simple physics engine or platformer physics engine,
set this to True to speed collision calculation.
Leave False if using PyMunk, if all sprites are moving,
or if no collision will be checked.
:param str hit_box_algorithm: One of 'None', 'Simple' or 'Detailed'. \
Defaults to 'Simple'. Use 'Simple' for the :data:`PhysicsEngineSimple`, \
:data:`PhysicsEnginePlatformer` \
and 'Detailed' for the :data:`PymunkPhysicsEngine`.
.. figure:: images/hit_box_algorithm_none.png
:width: 40%
hit_box_algorithm = "None"
.. figure:: images/hit_box_algorithm_simple.png
:width: 55%
hit_box_algorithm = "Simple"
.. figure:: images/hit_box_algorithm_detailed.png
:width: 75%
hit_box_algorithm = "Detailed"
:param float hit_box_detail: Float, defaults to 4.5. Used with 'Detailed' to hit box
:returns: A SpriteList.
"""
if len(base_directory) > 0 and not base_directory.endswith("/"):
base_directory += "/"
layer = get_tilemap_layer(map_object, layer_name)
if layer is None:
print(f"Warning, no layer named '{layer_name}'.")
return SpriteList()
if isinstance(layer, pytiled_parser.TileLayer):
return _process_tile_layer(
map_object,
layer,
scaling,
base_directory,
use_spatial_hash,
hit_box_algorithm,
hit_box_detail,
)
elif isinstance(layer, pytiled_parser.ObjectLayer):
return _process_object_layer(
map_object,
layer,
scaling,
base_directory,
use_spatial_hash,
hit_box_algorithm,
hit_box_detail,
)
print(f"Warning, layer '{layer_name}' has unexpected type. '{type(layer)}'")
return SpriteList()
| 34.384848
| 165
| 0.59822
|
import copy
import math
import os
from pathlib import Path
from typing import List, Optional, Tuple, Union, cast
import pytiled_parser
from arcade import (
AnimatedTimeBasedSprite,
AnimationKeyframe,
Sprite,
SpriteList,
load_texture,
)
from arcade.arcade_types import Point
from arcade.resources import resolve_resource_path
_FLIPPED_HORIZONTALLY_FLAG = 0x80000000
_FLIPPED_VERTICALLY_FLAG = 0x40000000
_FLIPPED_DIAGONALLY_FLAG = 0x20000000
def read_tmx(map_file: Union[str, Path]) -> pytiled_parser.TiledMap:
raise DeprecationWarning("The read_tmx function has been replaced with read_map. Use this function and convert your .tmx files to .json using the Tiled editor.")
def read_map(map_file: Union[str, Path]) -> pytiled_parser.TiledMap:
map_file = resolve_resource_path(map_file)
tile_map = pytiled_parser.parse_map(map_file)
return tile_map
def get_cartesian(
map_object: pytiled_parser.TiledMap, coordinates: pytiled_parser.OrderedPair
) -> pytiled_parser.OrderedPair:
x = math.floor(coordinates.x / map_object.tile_size.width)
y = math.floor(coordinates.y / map_object.tile_size.height)
return pytiled_parser.OrderedPair(x, y)
def get_tilemap_layer(
map_object: pytiled_parser.TiledMap, layer_path: str
) -> Optional[pytiled_parser.Layer]:
assert isinstance(map_object, pytiled_parser.TiledMap)
assert isinstance(layer_path, str)
def _get_tilemap_layer(path, layers):
layer_name = path.pop(0)
for layer in layers:
if layer.name == layer_name:
if isinstance(layer, pytiled_parser.LayerGroup):
if len(path) != 0:
return _get_tilemap_layer(path, layer.layers)
else:
return layer
return None
path = layer_path.strip("/").split("/")
layer = _get_tilemap_layer(path, map_object.layers)
return layer
def _get_tile_by_gid(
map_object: pytiled_parser.TiledMap, tile_gid: int
) -> Optional[pytiled_parser.Tile]:
flipped_diagonally = False
flipped_horizontally = False
flipped_vertically = False
if tile_gid & _FLIPPED_HORIZONTALLY_FLAG:
flipped_horizontally = True
tile_gid -= _FLIPPED_HORIZONTALLY_FLAG
if tile_gid & _FLIPPED_DIAGONALLY_FLAG:
flipped_diagonally = True
tile_gid -= _FLIPPED_DIAGONALLY_FLAG
if tile_gid & _FLIPPED_VERTICALLY_FLAG:
flipped_vertically = True
tile_gid -= _FLIPPED_VERTICALLY_FLAG
for tileset_key, tileset in map_object.tilesets.items():
if tile_gid < tileset_key:
continue
if (
tileset.tiles is None
and tileset.image is not None
and tileset_key <= tile_gid < tileset_key + tileset.tile_count
):
tile_ref = pytiled_parser.Tile(
id=(tile_gid - tileset_key), image=tileset.image
)
else:
tile_ref = tileset.tiles.get(tile_gid - tileset_key)
if tile_ref:
my_tile = copy.copy(tile_ref)
my_tile.tileset = tileset
my_tile.flipped_vertically = flipped_vertically
my_tile.flipped_diagonally = flipped_diagonally
my_tile.flipped_horizontally = flipped_horizontally
return my_tile
return None
def _get_tile_by_id(
map_object: pytiled_parser.TiledMap, tileset: pytiled_parser.Tileset, tile_id: int
) -> Optional[pytiled_parser.Tile]:
for tileset_key, cur_tileset in map_object.tilesets.items():
if cur_tileset is tileset:
for tile_key, tile in cur_tileset.tiles.items():
if tile_id == tile.id:
return tile
return None
def _get_image_info_from_tileset(tile):
image_x = 0
image_y = 0
if tile.tileset.image is not None:
margin = tile.tileset.margin or 0
spacing = tile.tileset.spacing or 0
row = tile.id // tile.tileset.columns
image_y = margin + row * (tile.tileset.tile_height + spacing)
col = tile.id % tile.tileset.columns
image_x = margin + col * (tile.tileset.tile_width + spacing)
if tile.tileset.image:
width = tile.tileset.tile_width
height = tile.tileset.tile_height
else:
width = tile.image_width
height = tile.image_height
return image_x, image_y, width, height
def _get_image_source(
tile: pytiled_parser.Tile,
base_directory: Optional[str],
map_directory: Optional[str],
):
image_file = None
if tile.image:
image_file = tile.image
elif tile.tileset.image:
image_file = tile.tileset.image
if not image_file:
print(
f"Warning for tile {tile.id_}, no image source listed either for individual tile, or as a tileset."
)
return None
if os.path.exists(image_file):
return image_file
if base_directory:
try2 = Path(base_directory, image_file)
if os.path.exists(try2):
return try2
if map_directory:
try3 = Path(map_directory, image_file)
if os.path.exists(try3):
return try3
print(
f"Warning, can't find image {image_file} for tile {tile.id} - {base_directory}"
)
return None
def _create_sprite_from_tile(
map_object: pytiled_parser.TiledMap,
tile: pytiled_parser.Tile,
scaling: float = 1.0,
base_directory: str = None,
hit_box_algorithm="Simple",
hit_box_detail: float = 4.5,
):
# --- Step 1, find a reference to an image this is going to be based off of
map_source = map_object.map_file
map_directory = os.path.dirname(map_source)
image_file = _get_image_source(tile, base_directory, map_directory)
# print(f"Creating tile: {tmx_file}")
if tile.animation:
# my_sprite = AnimatedTimeSprite(tmx_file, scaling)
my_sprite: Sprite = AnimatedTimeBasedSprite(image_file, scaling)
else:
image_x, image_y, width, height = _get_image_info_from_tileset(tile)
my_sprite = Sprite(
image_file,
scaling,
image_x,
image_y,
width,
height,
flipped_horizontally=tile.flipped_horizontally,
flipped_vertically=tile.flipped_vertically,
flipped_diagonally=tile.flipped_diagonally,
hit_box_algorithm=hit_box_algorithm,
hit_box_detail=hit_box_detail,
)
if tile.properties is not None and len(tile.properties) > 0:
for key, value in tile.properties.items():
my_sprite.properties[key] = value
if tile.type:
my_sprite.properties["type"] = tile.type
# print(tile.image.source, my_sprite.center_x, my_sprite.center_y)
if tile.objects is not None:
if len(tile.objects.tiled_objects) > 1:
print(
f"Warning, only one hit box supported for tile with image {tile.image.source}."
)
for hitbox in tile.objects.tiled_objects:
points: List[Point] = []
if isinstance(hitbox, pytiled_parser.tiled_object.Rectangle):
if hitbox.size is None:
print(
f"Warning: Rectangle hitbox created for without a "
f"height or width for {tile.image.source}. Ignoring."
)
continue
# print(my_sprite.width, my_sprite.height)
sx = hitbox.coordinates.x - (my_sprite.width / (scaling * 2))
sy = -(hitbox.coordinates.y - (my_sprite.height / (scaling * 2)))
ex = (hitbox.coordinates.x + hitbox.size.width) - (
my_sprite.width / (scaling * 2)
)
ey = -(
(hitbox.coordinates.y + hitbox.size.height)
- (my_sprite.height / (scaling * 2))
)
# print(f"Size: {hitbox.size} Location: {hitbox.location}")
p1 = [sx, sy]
p2 = [ex, sy]
p3 = [ex, ey]
p4 = [sx, ey]
# print(f"w:{my_sprite.width:.1f}, h:{my_sprite.height:.1f}", end=", ")
points = [p1, p2, p3, p4]
# for point in points:
# print(f"({point[0]:.1f}, {point[1]:.1f}) ")
# print()
elif isinstance(hitbox, pytiled_parser.tiled_object.Polygon) or isinstance(
hitbox, pytiled_parser.tiled_object.Polyline
):
for point in hitbox.points:
adj_x = (
point.x + hitbox.coordinates.x - my_sprite.width / (scaling * 2)
)
adj_y = -(
point.y
+ hitbox.coordinates.y
- my_sprite.height / (scaling * 2)
)
adj_point = [adj_x, adj_y]
points.append(adj_point)
# If we have a polyline, and it is closed, we need to
# remove the duplicate end-point
if points[0][0] == points[-1][0] and points[0][1] == points[-1][1]:
points.pop()
elif isinstance(hitbox, pytiled_parser.tiled_object.Ellipse):
if hitbox.size is None:
print(
f"Warning: Ellipse hitbox created for without a height "
f"or width for {tile.image.source}. Ignoring."
)
continue
# print(f"Size: {hitbox.size} Location: {hitbox.location}")
hw = hitbox.size.width / 2
hh = hitbox.size.height / 2
cx = hitbox.coordinates.x + hw
cy = hitbox.coordinates.y + hh
acx = cx - (my_sprite.width / (scaling * 2))
acy = cy - (my_sprite.height / (scaling * 2))
# print(f"acx: {acx} acy: {acy} cx: {cx} cy: {cy} hh: {hh} hw: {hw}")
total_steps = 8
angles = [
step / total_steps * 2 * math.pi for step in range(total_steps)
]
for angle in angles:
x = hw * math.cos(angle) + acx
y = -(hh * math.sin(angle) + acy)
point = [x, y]
points.append(point)
# for point in points:
# print(f"({point[0]:.1f}, {point[1]:.1f}) ")
# print()
else:
print(f"Warning: Hitbox type {type(hitbox)} not supported.")
my_sprite.set_hit_box(points)
if tile.animation is not None:
# Animated image
key_frame_list = []
# Loop through each frame
for frame in tile.animation:
# Get the tile for the frame
frame_tile = _get_tile_by_id(map_object, tile.tileset, frame.tile_id)
if frame_tile:
image_file = _get_image_source(
frame_tile, base_directory, map_directory
)
# Does the tile have an image?
if frame_tile.image:
# Yes, use it
texture = load_texture(image_file)
else:
# No image for tile? Pull from tilesheet
image_x, image_y, width, height = _get_image_info_from_tileset(
frame_tile
)
texture = load_texture(image_file, image_x, image_y, width, height)
key_frame = AnimationKeyframe(frame.tile_id, frame.duration, texture)
key_frame_list.append(key_frame)
# If this is the first texture in the animation, go ahead and
# set it as the current texture.
if len(key_frame_list) == 1:
my_sprite.texture = key_frame.texture
# print(f"Add tile {frame.tile_id} for keyframe. Source: {frame_tile.image.source}")
cast(AnimatedTimeBasedSprite, my_sprite).frames = key_frame_list
return my_sprite
def _process_object_layer(
map_object: pytiled_parser.TiledMap,
layer: pytiled_parser.ObjectLayer,
scaling: float = 1,
base_directory: str = "",
use_spatial_hash: Optional[bool] = None,
hit_box_algorithm="Simple",
hit_box_detail=4.5,
) -> SpriteList:
sprite_list: SpriteList = SpriteList(use_spatial_hash=use_spatial_hash)
for cur_object in layer.tiled_objects:
if not hasattr(cur_object, "gid"):
print(
"Warning: Currently only tiles (not objects) are supported in object layers."
)
continue
tile = _get_tile_by_gid(map_object, cur_object.gid)
my_sprite = _create_sprite_from_tile(
map_object,
tile,
scaling=scaling,
base_directory=base_directory,
hit_box_algorithm=hit_box_algorithm,
hit_box_detail=hit_box_detail,
)
x = cur_object.coordinates.x * scaling
y = (
map_object.map_size.height * map_object.tile_size[1]
- cur_object.coordinates.y
) * scaling
my_sprite.width = width = cur_object.size[0] * scaling
my_sprite.height = height = cur_object.size[1] * scaling
center_x = width / 2
center_y = height / 2
if cur_object.rotation is not None:
rotation = -math.radians(cur_object.rotation)
else:
rotation = 0
cos_rotation = math.cos(rotation)
sin_rotation = math.sin(rotation)
rotated_center_x = center_x * cos_rotation - center_y * sin_rotation
rotated_center_y = center_x * sin_rotation + center_y * cos_rotation
my_sprite.position = (x + rotated_center_x, y + rotated_center_y)
my_sprite.angle = math.degrees(rotation)
# Opacity
opacity = layer.opacity
if opacity:
my_sprite.alpha = int(opacity * 255)
# Properties
if cur_object.properties is not None and "change_x" in cur_object.properties:
my_sprite.change_x = float(cur_object.properties["change_x"])
if cur_object.properties is not None and "change_y" in cur_object.properties:
my_sprite.change_y = float(cur_object.properties["change_y"])
if (
cur_object.properties is not None
and "boundary_bottom" in cur_object.properties
):
my_sprite.boundary_bottom = float(cur_object.properties["boundary_bottom"])
if (
cur_object.properties is not None
and "boundary_top" in cur_object.properties
):
my_sprite.boundary_top = float(cur_object.properties["boundary_top"])
if (
cur_object.properties is not None
and "boundary_left" in cur_object.properties
):
my_sprite.boundary_left = float(cur_object.properties["boundary_left"])
if (
cur_object.properties is not None
and "boundary_right" in cur_object.properties
):
my_sprite.boundary_right = float(cur_object.properties["boundary_right"])
if cur_object.properties is not None:
my_sprite.properties.update(cur_object.properties)
if cur_object.type:
my_sprite.properties["type"] = cur_object.type
if cur_object.name:
my_sprite.properties["name"] = cur_object.name
sprite_list.append(my_sprite)
return sprite_list
def _process_tile_layer(
map_object: pytiled_parser.TiledMap,
layer: pytiled_parser.TileLayer,
scaling: float = 1,
base_directory: str = "",
use_spatial_hash: Optional[bool] = None,
hit_box_algorithm="Simple",
hit_box_detail: float = 4.5,
) -> SpriteList:
sprite_list: SpriteList = SpriteList(use_spatial_hash=use_spatial_hash)
map_array = layer.data
# Loop through the layer and add in the wall list
for row_index, row in enumerate(map_array):
for column_index, item in enumerate(row):
# Check for empty square
if item == 0:
continue
tile = _get_tile_by_gid(map_object, item)
if tile is None:
error_msg = (
f"Warning, couldn't find tile for item {item} in layer "
f"'{layer.name}' in file '{map_object.map_file}'."
)
raise ValueError(error_msg)
my_sprite = _create_sprite_from_tile(
map_object,
tile,
scaling=scaling,
base_directory=base_directory,
hit_box_algorithm=hit_box_algorithm,
hit_box_detail=hit_box_detail,
)
if my_sprite is None:
print(
f"Warning: Could not create sprite number {item} in layer '{layer.name}' {tile.image.source}"
)
else:
my_sprite.center_x = (
column_index * (map_object.tile_size[0] * scaling)
+ my_sprite.width / 2
)
my_sprite.center_y = (map_object.map_size.height - row_index - 1) * (
map_object.tile_size[1] * scaling
) + my_sprite.height / 2
opacity = layer.opacity
if opacity:
my_sprite.alpha = int(opacity * 255)
sprite_list.append(my_sprite)
return sprite_list
def process_layer(
map_object: pytiled_parser.TiledMap,
layer_name: str,
scaling: float = 1,
base_directory: str = "",
use_spatial_hash: Optional[bool] = None,
hit_box_algorithm="Simple",
hit_box_detail: float = 4.5,
) -> SpriteList:
if len(base_directory) > 0 and not base_directory.endswith("/"):
base_directory += "/"
layer = get_tilemap_layer(map_object, layer_name)
if layer is None:
print(f"Warning, no layer named '{layer_name}'.")
return SpriteList()
if isinstance(layer, pytiled_parser.TileLayer):
return _process_tile_layer(
map_object,
layer,
scaling,
base_directory,
use_spatial_hash,
hit_box_algorithm,
hit_box_detail,
)
elif isinstance(layer, pytiled_parser.ObjectLayer):
return _process_object_layer(
map_object,
layer,
scaling,
base_directory,
use_spatial_hash,
hit_box_algorithm,
hit_box_detail,
)
print(f"Warning, layer '{layer_name}' has unexpected type. '{type(layer)}'")
return SpriteList()
| true
| true
|
f705811f0f591f97ad3e4904b50ed90739fad929
| 98
|
py
|
Python
|
lab5_threshold_functions/sat/types.py
|
j-adamczyk/ADPTO_templates
|
e0a4e77ba8de21fe966388ccee66ef62224a2d99
|
[
"MIT"
] | null | null | null |
lab5_threshold_functions/sat/types.py
|
j-adamczyk/ADPTO_templates
|
e0a4e77ba8de21fe966388ccee66ef62224a2d99
|
[
"MIT"
] | null | null | null |
lab5_threshold_functions/sat/types.py
|
j-adamczyk/ADPTO_templates
|
e0a4e77ba8de21fe966388ccee66ef62224a2d99
|
[
"MIT"
] | 1
|
2022-03-25T07:25:26.000Z
|
2022-03-25T07:25:26.000Z
|
from typing import List, Set, Tuple
VertexSets = List[Set[int]]
EdgeList = List[Tuple[int, int]]
| 19.6
| 35
| 0.72449
|
from typing import List, Set, Tuple
VertexSets = List[Set[int]]
EdgeList = List[Tuple[int, int]]
| true
| true
|
f705813cf40811a24e6a3961328998417d3b7e4d
| 4,566
|
py
|
Python
|
main_app/tests/test_models.py
|
wszoltysek/give_things
|
240266460f0d7b7777cdaa8383edce80ea9e6024
|
[
"MIT"
] | null | null | null |
main_app/tests/test_models.py
|
wszoltysek/give_things
|
240266460f0d7b7777cdaa8383edce80ea9e6024
|
[
"MIT"
] | null | null | null |
main_app/tests/test_models.py
|
wszoltysek/give_things
|
240266460f0d7b7777cdaa8383edce80ea9e6024
|
[
"MIT"
] | null | null | null |
import pytest
from main_app.models import *
from main_app.tests.utils import *
# TESTS FOR CREATE MODELS:
@pytest.mark.django_db
def test_create_user():
# Given:
users_before = User.objects.count()
# When:
new_user = fake_user()
# Then:
assert User.objects.count() == users_before + 1
assert new_user.pk == 1
assert new_user.is_anonymous is False
@pytest.mark.django_db
def test_create_category():
# Given:
categories_before = Category.objects.count()
# When:
new_category = fake_category()
# Then:
assert Category.objects.count() == categories_before + 1
assert Category.objects.count() == 1
assert new_category.pk == 1
@pytest.mark.django_db
def test_create_institution():
# Given:
institutions_before = Institution.objects.count()
# When:
new_institution = fake_institution()
# Then:
assert Institution.objects.count() == institutions_before + 1
assert Institution.objects.count() == 1
assert new_institution.pk == 1
@pytest.mark.django_db
def test_create_donation():
# Given:
donations_before = Donation.objects.count()
# When:
new_donation = fake_donation()
# Then:
assert Donation.objects.count() == donations_before + 1
assert Donation.objects.count() == 1
assert new_donation.pk == 1
# TESTS FOR EDIT MODELS:
@pytest.mark.django_db
def test_edit_user():
# Given:
user = fake_user()
# When:
previous_user_name = user.username
user.username = "Charity"
# Then:
assert previous_user_name != user.username
assert user.username == "Charity"
@pytest.mark.django_db
def test_edit_category():
# Given:
category = fake_category()
# When:
previous_category_name = category.name
category.name = "Clothes"
# Then:
assert previous_category_name != category.name
assert category.name == "Clothes"
@pytest.mark.django_db
def test_edit_institution():
# Given:
institution = fake_institution()
# When:
previous_institution_name = institution.name
institution.name = "Fundacja"
previous_institution_description = institution.description
institution.description = "Some description"
previous_institution_pk = institution.pk
institution.pk = 2
# Then:
assert previous_institution_name != institution.name
assert institution.name == "Fundacja"
assert previous_institution_description != institution.description
assert institution.description == "Some description"
assert previous_institution_pk != institution.pk
assert institution.pk == 2
@pytest.mark.django_db
def test_edit_donation():
# Given:
donation = fake_donation()
# When:
previous_donation_city = donation.city
donation.city = "Katowice"
previous_donation_date = donation.pick_up_date
donation.pick_up_date = "2020-06-17"
previous_donation_comment = donation.pick_up_comment
donation.pick_up_comment = "Comment"
previous_donation_status = donation.collected
donation.collected = False
# Then:
assert previous_donation_city != donation.city
assert donation.city == "Katowice"
assert previous_donation_date != donation.pick_up_date
assert donation.pick_up_date == "2020-06-17"
assert previous_donation_comment != donation.pick_up_comment
assert donation.pick_up_comment == "Comment"
assert previous_donation_status != donation.collected
assert donation.collected is False
# TESTS FOR DELETE MODELS:
@pytest.mark.django_db
def test_delete_user():
# Given:
user = fake_user()
users_before_deletion = User.objects.count()
# When:
user.delete()
# Then:
assert User.objects.count() == users_before_deletion - 1
@pytest.mark.django_db
def test_delete_category():
# Given:
category = fake_category()
categories_before_deletion = Category.objects.count()
# When:
category.delete()
# Then:
assert Category.objects.count() == categories_before_deletion - 1
@pytest.mark.django_db
def test_delete_institution():
# Given:
institution = fake_institution()
institution_before_deletion = Institution.objects.count()
# When:
institution.delete()
# Then:
assert Institution.objects.count() == institution_before_deletion - 1
@pytest.mark.django_db
def test_delete_donation():
# Given:
donation = fake_donation()
donation_before_deletion = Donation.objects.count()
# When:
donation.delete()
# Then:
assert Donation.objects.count() == donation_before_deletion - 1
| 26.858824
| 73
| 0.708717
|
import pytest
from main_app.models import *
from main_app.tests.utils import *
@pytest.mark.django_db
def test_create_user():
users_before = User.objects.count()
new_user = fake_user()
assert User.objects.count() == users_before + 1
assert new_user.pk == 1
assert new_user.is_anonymous is False
@pytest.mark.django_db
def test_create_category():
categories_before = Category.objects.count()
new_category = fake_category()
assert Category.objects.count() == categories_before + 1
assert Category.objects.count() == 1
assert new_category.pk == 1
@pytest.mark.django_db
def test_create_institution():
institutions_before = Institution.objects.count()
new_institution = fake_institution()
assert Institution.objects.count() == institutions_before + 1
assert Institution.objects.count() == 1
assert new_institution.pk == 1
@pytest.mark.django_db
def test_create_donation():
donations_before = Donation.objects.count()
new_donation = fake_donation()
assert Donation.objects.count() == donations_before + 1
assert Donation.objects.count() == 1
assert new_donation.pk == 1
@pytest.mark.django_db
def test_edit_user():
user = fake_user()
previous_user_name = user.username
user.username = "Charity"
assert previous_user_name != user.username
assert user.username == "Charity"
@pytest.mark.django_db
def test_edit_category():
category = fake_category()
previous_category_name = category.name
category.name = "Clothes"
assert previous_category_name != category.name
assert category.name == "Clothes"
@pytest.mark.django_db
def test_edit_institution():
institution = fake_institution()
previous_institution_name = institution.name
institution.name = "Fundacja"
previous_institution_description = institution.description
institution.description = "Some description"
previous_institution_pk = institution.pk
institution.pk = 2
assert previous_institution_name != institution.name
assert institution.name == "Fundacja"
assert previous_institution_description != institution.description
assert institution.description == "Some description"
assert previous_institution_pk != institution.pk
assert institution.pk == 2
@pytest.mark.django_db
def test_edit_donation():
donation = fake_donation()
previous_donation_city = donation.city
donation.city = "Katowice"
previous_donation_date = donation.pick_up_date
donation.pick_up_date = "2020-06-17"
previous_donation_comment = donation.pick_up_comment
donation.pick_up_comment = "Comment"
previous_donation_status = donation.collected
donation.collected = False
assert previous_donation_city != donation.city
assert donation.city == "Katowice"
assert previous_donation_date != donation.pick_up_date
assert donation.pick_up_date == "2020-06-17"
assert previous_donation_comment != donation.pick_up_comment
assert donation.pick_up_comment == "Comment"
assert previous_donation_status != donation.collected
assert donation.collected is False
@pytest.mark.django_db
def test_delete_user():
user = fake_user()
users_before_deletion = User.objects.count()
user.delete()
assert User.objects.count() == users_before_deletion - 1
@pytest.mark.django_db
def test_delete_category():
category = fake_category()
categories_before_deletion = Category.objects.count()
category.delete()
assert Category.objects.count() == categories_before_deletion - 1
@pytest.mark.django_db
def test_delete_institution():
institution = fake_institution()
institution_before_deletion = Institution.objects.count()
institution.delete()
assert Institution.objects.count() == institution_before_deletion - 1
@pytest.mark.django_db
def test_delete_donation():
donation = fake_donation()
donation_before_deletion = Donation.objects.count()
donation.delete()
assert Donation.objects.count() == donation_before_deletion - 1
| true
| true
|
f70581a8075af7680223c94e5ae62ce648e7287c
| 1,866
|
py
|
Python
|
homeassistant/components/rpi_power/binary_sensor.py
|
MrDelik/core
|
93a66cc357b226389967668441000498a10453bb
|
[
"Apache-2.0"
] | 30,023
|
2016-04-13T10:17:53.000Z
|
2020-03-02T12:56:31.000Z
|
homeassistant/components/rpi_power/binary_sensor.py
|
MrDelik/core
|
93a66cc357b226389967668441000498a10453bb
|
[
"Apache-2.0"
] | 31,101
|
2020-03-02T13:00:16.000Z
|
2022-03-31T23:57:36.000Z
|
homeassistant/components/rpi_power/binary_sensor.py
|
MrDelik/core
|
93a66cc357b226389967668441000498a10453bb
|
[
"Apache-2.0"
] | 11,956
|
2016-04-13T18:42:31.000Z
|
2020-03-02T09:32:12.000Z
|
"""
A sensor platform which detects underruns and capped status from the official Raspberry Pi Kernel.
Minimal Kernel needed is 4.14+
"""
import logging
from rpi_bad_power import UnderVoltage, new_under_voltage
from homeassistant.components.binary_sensor import (
BinarySensorDeviceClass,
BinarySensorEntity,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant
from homeassistant.helpers.entity_platform import AddEntitiesCallback
_LOGGER = logging.getLogger(__name__)
DESCRIPTION_NORMALIZED = "Voltage normalized. Everything is working as intended."
DESCRIPTION_UNDER_VOLTAGE = "Under-voltage was detected. Consider getting a uninterruptible power supply for your Raspberry Pi."
async def async_setup_entry(
hass: HomeAssistant,
config_entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Set up rpi_power binary sensor."""
under_voltage = await hass.async_add_executor_job(new_under_voltage)
async_add_entities([RaspberryChargerBinarySensor(under_voltage)], True)
class RaspberryChargerBinarySensor(BinarySensorEntity):
"""Binary sensor representing the rpi power status."""
_attr_device_class = BinarySensorDeviceClass.PROBLEM
_attr_icon = "mdi:raspberry-pi"
_attr_name = "RPi Power status"
_attr_unique_id = "rpi_power" # only one sensor possible
def __init__(self, under_voltage: UnderVoltage) -> None:
"""Initialize the binary sensor."""
self._under_voltage = under_voltage
def update(self) -> None:
"""Update the state."""
value = self._under_voltage.get()
if self._attr_is_on != value:
if value:
_LOGGER.warning(DESCRIPTION_UNDER_VOLTAGE)
else:
_LOGGER.info(DESCRIPTION_NORMALIZED)
self._attr_is_on = value
| 33.927273
| 128
| 0.744373
|
import logging
from rpi_bad_power import UnderVoltage, new_under_voltage
from homeassistant.components.binary_sensor import (
BinarySensorDeviceClass,
BinarySensorEntity,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant
from homeassistant.helpers.entity_platform import AddEntitiesCallback
_LOGGER = logging.getLogger(__name__)
DESCRIPTION_NORMALIZED = "Voltage normalized. Everything is working as intended."
DESCRIPTION_UNDER_VOLTAGE = "Under-voltage was detected. Consider getting a uninterruptible power supply for your Raspberry Pi."
async def async_setup_entry(
hass: HomeAssistant,
config_entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
under_voltage = await hass.async_add_executor_job(new_under_voltage)
async_add_entities([RaspberryChargerBinarySensor(under_voltage)], True)
class RaspberryChargerBinarySensor(BinarySensorEntity):
_attr_device_class = BinarySensorDeviceClass.PROBLEM
_attr_icon = "mdi:raspberry-pi"
_attr_name = "RPi Power status"
_attr_unique_id = "rpi_power"
def __init__(self, under_voltage: UnderVoltage) -> None:
self._under_voltage = under_voltage
def update(self) -> None:
value = self._under_voltage.get()
if self._attr_is_on != value:
if value:
_LOGGER.warning(DESCRIPTION_UNDER_VOLTAGE)
else:
_LOGGER.info(DESCRIPTION_NORMALIZED)
self._attr_is_on = value
| true
| true
|
f70582e58b5ad8dd04398cbdb1c24db03fe3139a
| 1,746
|
py
|
Python
|
audiostream.py
|
ITNano/soundserver
|
b84cbfd821987ad8af72a6c2677caa0b949abff6
|
[
"MIT"
] | null | null | null |
audiostream.py
|
ITNano/soundserver
|
b84cbfd821987ad8af72a6c2677caa0b949abff6
|
[
"MIT"
] | null | null | null |
audiostream.py
|
ITNano/soundserver
|
b84cbfd821987ad8af72a6c2677caa0b949abff6
|
[
"MIT"
] | null | null | null |
import numpy
import wave
class Audiostream(object):
def __init__(self, volume_prio=1):
self.volume_prio = volume_prio
def get_data(self, frame_count, channels, width, rate):
return "".join(["\x00"]*frames*self.channels*self.width)
def get_volume_priority(self):
return self.volume_prio
class WaveAudioStream(Audiostream):
def __init__(self, file, volume_prio=1):
Audiostream.__init__(self, volume_prio)
self.wf = wave.open(file)
def get_data(self, frame_count, channels, width, rate, format):
data = self.wf.readframes(frame_count)
if len(data) > 0:
return numpy.fromstring(data, format)
else:
return None
class FeedAudioStream(Audiostream):
def __init__(self, keep_open=False, volume_prio=1):
Audiostream.__init__(self, volume_prio)
self.keep_open = keep_open
self.closed = False
self.data = []
self.offset = 0
def feed(self, data):
if self.closed:
print("WARNING: Trying to add data to a closed stream.")
self.data.append(data)
def clean(self):
self.data = self.data[self.offset:]
self.offset = 0
def get_data(self, frame_count, channels, width, rate, format):
size = min(len(self.data)-self.offset, frame_count*channels)
if size == 0 and not self.keep_open:
self.closed = True
return None
data = numpy.array(self.data[self.offset:self.offset+size])
self.offset += size
if self.offset > rate:
self.clean()
return data
| 30.103448
| 68
| 0.580756
|
import numpy
import wave
class Audiostream(object):
def __init__(self, volume_prio=1):
self.volume_prio = volume_prio
def get_data(self, frame_count, channels, width, rate):
return "".join(["\x00"]*frames*self.channels*self.width)
def get_volume_priority(self):
return self.volume_prio
class WaveAudioStream(Audiostream):
def __init__(self, file, volume_prio=1):
Audiostream.__init__(self, volume_prio)
self.wf = wave.open(file)
def get_data(self, frame_count, channels, width, rate, format):
data = self.wf.readframes(frame_count)
if len(data) > 0:
return numpy.fromstring(data, format)
else:
return None
class FeedAudioStream(Audiostream):
def __init__(self, keep_open=False, volume_prio=1):
Audiostream.__init__(self, volume_prio)
self.keep_open = keep_open
self.closed = False
self.data = []
self.offset = 0
def feed(self, data):
if self.closed:
print("WARNING: Trying to add data to a closed stream.")
self.data.append(data)
def clean(self):
self.data = self.data[self.offset:]
self.offset = 0
def get_data(self, frame_count, channels, width, rate, format):
size = min(len(self.data)-self.offset, frame_count*channels)
if size == 0 and not self.keep_open:
self.closed = True
return None
data = numpy.array(self.data[self.offset:self.offset+size])
self.offset += size
if self.offset > rate:
self.clean()
return data
| true
| true
|
f7058455308d91038b90e873d4f6c9da997ca842
| 4,206
|
py
|
Python
|
paypalpayoutssdk/payouts/payouts_item_get_request.py
|
truthiswill/Payouts-Python-SDK
|
ba04ffafb8165a1b7cdfd5841f08a96dccdd190b
|
[
"BSD-Source-Code"
] | 23
|
2020-03-02T13:31:55.000Z
|
2022-03-06T11:25:21.000Z
|
paypalpayoutssdk/payouts/payouts_item_get_request.py
|
truthiswill/Payouts-Python-SDK
|
ba04ffafb8165a1b7cdfd5841f08a96dccdd190b
|
[
"BSD-Source-Code"
] | 4
|
2020-09-26T08:40:26.000Z
|
2022-03-01T17:29:51.000Z
|
paypalpayoutssdk/payouts/payouts_item_get_request.py
|
truthiswill/Payouts-Python-SDK
|
ba04ffafb8165a1b7cdfd5841f08a96dccdd190b
|
[
"BSD-Source-Code"
] | 21
|
2020-02-07T10:02:57.000Z
|
2021-09-09T18:05:02.000Z
|
# This class was generated on Mon, 23 Dec 2019 12:39:22 IST by version 0.1.0-dev+904328-dirty of Braintree SDK Generator
# payouts_item_get_request.py
# @version 0.1.0-dev+904328-dirty
# @type request
# @data H4sIAAAAAAAC/+xb63PbuBH/3r9ih9eZnGcoyXe5p795bF/j9pq4sZNOx/VYELkSUYMADwta4WTyv3fwIM2XHMdxdH3ok61dPPa3u1gsFuD76CXLMTqIClap0tCEG8ynKzRRHB0jJZoXhisZHUTnmVoTpGgYFwRLpYGB7wS2TwyLCk6Pp3AI/yz3958niUrR/Ye+1bVtdc1Tz53dsSFDURBUqgSeojR8WUGKkmNqx89RGprC6dJPZ38Cp9Agdr0SJqEkBJPhJ89t1N2ktn89Bd6iBL4EbkCw5IaADYY2mkliiVXP2NDTKI7+VqKuzphmORrUFB1cXsXRC2Qp6j71F6XzPu2MmaxDex9dVIW1FhnN5SqKo7dMc7YQ2LFiDTeKo79gFVgDg15kCKfHoJY18NqUzrjrjCeZVQ9lal2b3WI61JpVXoz9OHqNLH0lRRUdLJkgtITfSq4xjQ6MLjGOzrQqUBuOFB3IUogPV74NkvGDWKIlUaEkoac1OM+8VKdWqrrJEPXHcbbRkWGmJGAyBWUy1A24neu2XPfBZg6Etp0/7qd27ltuqr6TdulDO5aS/1YinLHqjInJCiVqZjCFROW5ktadTcYMCC5vyCmFUKaoJ87aGhPkt6gnxFOElg5oCm8IU+f3RrPkhsvV5+rA+Xrcd+SjUmuUSQWJkreoySIbKCcJja47je6UNM4fKisZTgYaSZU62YKJ8V2SMbnCa2uhjvx9zlDyugXYFt6knIAVheC1mTJOYVU/saVqEw0RLbXKr1muSmk6eLr0e+xgndC3C/vnkksmE85E2xtjoDLJgNmFu2CCyQRB6WZ5p+UWjJfcKWHod6MgL02mESdJxjRLDGo4PX81+e7bb35su2GKV1/PUpXQjEuDK83sALOUa0zMTCOZWd14YhvTbO8pdpz4o3BvmSi7PlpThkAdJw77Y85XmYEFHvgYWgofQP0vwf2vQwkOLmpn9gCRI4HgNwjzP5/9Y+6dnGkEqQyYquAJE6KCpfZOwcQ0hOl61N4ckGLCcyaaHuNzXbw8bs1F5SLltzzF1EqowGSqJCZTk9H4dLMa4S9uDSLooHyQZb5AbbOJWpBCsATrTLHjBDEQIlwetWkEE+syYF3m6uvMmIIOZrP1ej3lpKZKr2aclHOoSddHppnJxZP4ydWDooBRYzGgTd1FgF0E2EWA/84IcDXM2VBrpUfyG0umbmJTk0YyGst61DHqUas3xUW56ifXLeJQQJ9SOzfVkgl3lG+S4kRpjcKtVShKXShCeuK0axMSLpdK527qa5vWdxCNMIfI3rw+dSC88p2jN73ucklnoO3EmhyJ2Kobbe5oQwCB5xdt6tgL9MebLUot7Z+2yIGw8ZCWlTmTE40staOAbR7U/UXkDqv1xC20Y7/QNhZI3PzXadPqDtWGBl2UhxKYFdwGO5am3Mfnexf5D0+/yJccRXeF15ShTZz2Q6nHtfLelLDSrvHHmeQzFjVRb7etKUPJNTJqFuqXEfNXLm+gPfNAYFdQ6AhcUzY7hvZVromLnJjC5YvDi5NXh+e+OlGnQazgM41LtPsZ2l8THepcNPsqYwYVo4nrsfflPSrTuOygDISRjFblhUB7OGd6hQbevP51ChcKcnaDIS3wqG0SE9vmCy49J0eTqRTW3GQ+9F7aCH2BeWF7THxsNpje5QBGKUFTjmbp0gC73c/0Mvnh+x/395w2p2CzkULjpNAqQSIuVzFwmYgy9ZPO/ziPYf71PHYp+HxvDk2eSlOwiOYW6xy4D6w3WEFtL4tVSZt5t4pLrFGBx+jxMJtQkTW8NI68pQXlddrbUQJpaLwXFxdntRma9M1sMN6WEGgUHfH975FThlU/NNmIqQr8qKN8//NPP31F6JLiyXd7depOqG+R3DHLlQ59nurGd4YuJcsXfFWqkkTV23gJcyYNT6gOqt4Nz21W68LJ6yAh9VJZJpmTjRHxlXQl4pntO6kh9X9O3316ervJDFe9DHcX+HaBbxf4doHv/yDwxQ+9O10wk2T9o/OQt+kE3bqUCppt3a66/l/k2ONuSP2p595b4TFUgT5E1L3sD1dpiUaHLkAyyrK4XAm7ehJecJTmMyFePqAMO1KD/Y8twN6HcFd/3dVfd/XXTwjTUvWukwNh6CchYFGBCV9yqyFlfAlKKsOXPAn7DJyXRaG0ISgLG8++29/fh8Pzo9PTVq7kIsc3liOVnPS5W8sV/AuGXsLQEMeqJ57ZeuZjQ9gUjpT2+XZK4PwOYd7E72ubXcz9QgMuGz9DMk8bDl76GuIQZpBjUHQcsO4vdRVMm2oLz1mEq5obvF6WQgylHud3RT8+OXt9cnR4cXLs03In+jOCpq8DNoUjJmGBdmMqiUskcvQYJE9u/H92ucsqPG5y2gi1Pmkj2wKBCsGNjzdLrsnEIBiZMH6tClhnKO+U6B5gNHNuyd/HtblZh38fEblATXZHNy2d2hE83O3gWPFblEMgHfLjkLghnMmDJbeIKudpKnAIq0t/HC4/RnDJN4TABCn3HNEojZCXwvBCYLsdhXMvl6swlNFKVjlPvH6YXQ/PKO6MvR1NFRqX/F03965JI48VHcsJbbgRGNcBulbOFiWnctmXvCGN7LiO1Tr1/A7y6oE/3tEe54uhv3einIsquOWhdckbqdbSnhTcWfiBYdTPcq9jh0n7Tp13nTpXtVOH9ls7LXdzhQ17dGCNvju2vPC0z79i5aHCUJ+y6tRle7lVkHrNhECzAVLDHILyLGtwXVeTQq8aC7WKAVPwR3eXfr1Fmau79lRn9cDgmc1an4EX259f4c055GrBBUKRKYkh9d/WEnNp9egb8wGrVyQepuSnx7XwcKHda+X2Y3QJTAJLEntet+ipIoP5k/v45ipD+w32EnFT9STwdg+/doWHXeHhf7XwcM+3NQPW6I7XKwa7L63gLSe+EOjThEqVne9tOp9ZbWc9hBA+WgUf8h4Q3z8lsMN5pkqR2jNqeES05ibjEp7vQ8qqbR07Dc/xOlyoYVcFA9ZI1dye1W28t23r9I+T//hozchnic0YsdXG5al7d4em19vfCX70nuf58+c/N/c8309/2NuWojrfEXUV1Wd92oVJq/vvgMV/IrYRT8MeYnJGbrUNX5s9VcoSHSlpUIZP5yL3RYwvY87+Re7+/oUxxV/9DehB9KeTi8h/RhgdRLPbb2b1V2mz9uees/fd+PUhiqPzG140kpy8KzAxmJ47LEcqxejg2/39D3/4NwAAAP//
# DO NOT EDIT
import paypalhttp
try:
from urllib import quote # Python 2.X
except ImportError:
from urllib.parse import quote # Python 3+
class PayoutsItemGetRequest:
"""
Shows details for a payout item, by ID. A <code>payout_item_id</code> helps you identify denied payments. If a payment is denied, you can use the <code>payout_item_id</code> to identify the payment even if it lacks a <code>transaction_id</code>.
"""
def __init__(self, payout_item_id):
self.verb = "GET"
self.path = "/v1/payments/payouts-item/{payout_item_id}?".replace("{payout_item_id}", quote(str(payout_item_id)))
self.headers = {}
self.headers["Content-Type"] = "application/json"
self.body = None
| 161.769231
| 3,256
| 0.906087
|
import paypalhttp
try:
from urllib import quote
except ImportError:
from urllib.parse import quote
class PayoutsItemGetRequest:
def __init__(self, payout_item_id):
self.verb = "GET"
self.path = "/v1/payments/payouts-item/{payout_item_id}?".replace("{payout_item_id}", quote(str(payout_item_id)))
self.headers = {}
self.headers["Content-Type"] = "application/json"
self.body = None
| true
| true
|
f70586c1efc45321eec56c33cb0c96e78f531a4d
| 7,859
|
py
|
Python
|
benchmarks/f3_wrong_hints_permutations/scaling_nonlinear_software/10-19_13.py
|
EnricoMagnago/F3
|
c863215c318d7d5f258eb9be38c6962cf6863b52
|
[
"MIT"
] | 3
|
2021-04-23T23:29:26.000Z
|
2022-03-23T10:00:30.000Z
|
benchmarks/f3_wrong_hints_permutations/scaling_nonlinear_software/10-19_13.py
|
EnricoMagnago/F3
|
c863215c318d7d5f258eb9be38c6962cf6863b52
|
[
"MIT"
] | null | null | null |
benchmarks/f3_wrong_hints_permutations/scaling_nonlinear_software/10-19_13.py
|
EnricoMagnago/F3
|
c863215c318d7d5f258eb9be38c6962cf6863b52
|
[
"MIT"
] | 1
|
2021-11-17T22:02:56.000Z
|
2021-11-17T22:02:56.000Z
|
from typing import FrozenSet, Tuple
import pysmt.typing as types
from pysmt.environment import Environment as PysmtEnv
from pysmt.fnode import FNode
from utils import symb_to_next
from hint import Hint, Location
def transition_system(env: PysmtEnv) -> Tuple[FrozenSet[FNode], FNode, FNode,
FNode]:
assert isinstance(env, PysmtEnv)
mgr = env.formula_manager
pc = mgr.Symbol("pc", types.INT)
x = mgr.Symbol("x", types.INT)
y = mgr.Symbol("y", types.INT)
z = mgr.Symbol("z", types.INT)
x_pc = symb_to_next(mgr, pc)
x_x = symb_to_next(mgr, x)
x_y = symb_to_next(mgr, y)
x_z = symb_to_next(mgr, z)
symbols = frozenset([pc, x, y, z])
n_locs = 5
int_bound = n_locs
pcs = []
x_pcs = []
ints = [mgr.Int(i) for i in range(int_bound)]
for l in range(n_locs):
n = ints[l]
pcs.append(mgr.Equals(pc, n))
x_pcs.append(mgr.Equals(x_pc, n))
m_1 = mgr.Int(-1)
pcend = mgr.Equals(pc, m_1)
x_pcend = mgr.Equals(x_pc, m_1)
# initial location.
init = pcs[0]
# control flow graph.
cfg = mgr.And(
# pc = -1 : -1,
mgr.Implies(pcend, x_pcend),
# pc = 0 & !(y >= 1) : -1,
mgr.Implies(mgr.And(pcs[0], mgr.Not(mgr.GE(y, ints[1]))), x_pcend),
# pc = 0 & y >= 1 : 1,
mgr.Implies(mgr.And(pcs[0], mgr.GE(y, ints[1])), x_pcs[1]),
# pc = 1 & !(z >= 1) : -1,
mgr.Implies(mgr.And(pcs[1], mgr.Not(mgr.GE(z, ints[1]))), x_pcend),
# pc = 1 & z >= 1 : 2,
mgr.Implies(mgr.And(pcs[1], mgr.GE(z, ints[1])), x_pcs[2]),
# pc = 2 & !(x >= 0) : -1,
mgr.Implies(mgr.And(pcs[2], mgr.Not(mgr.GE(x, ints[0]))), x_pcend),
# pc = 2 & x >= 0 : 3,
mgr.Implies(mgr.And(pcs[2], mgr.GE(x, ints[0])), x_pcs[3]),
# pc = 3 : 4,
mgr.Implies(pcs[3], x_pcs[4]),
# pc = 4 : 2,
mgr.Implies(pcs[4], x_pcs[2]))
# transition labels.
labels = mgr.And(
# (pc = -1 & pc' = -1) -> (x' = x & y' = y & z' = z),
mgr.Implies(
mgr.And(pcend, x_pcend),
mgr.And(mgr.Equals(x_x, x), mgr.Equals(x_y, y),
mgr.Equals(x_z, z))),
# (pc = 0 & pc' = -1) -> (x' = x & y' = y & z' = z),
mgr.Implies(
mgr.And(pcs[0], x_pcend),
mgr.And(mgr.Equals(x_x, x), mgr.Equals(x_y, y),
mgr.Equals(x_z, z))),
# (pc = 0 & pc' = 1) -> (x' = x & y' = y & z' = z),
mgr.Implies(
mgr.And(pcs[0], x_pcs[1]),
mgr.And(mgr.Equals(x_x, x), mgr.Equals(x_y, y),
mgr.Equals(x_z, z))),
# (pc = 1 & pc' = -1) -> (x' = x & y' = y & z' = z),
mgr.Implies(
mgr.And(pcs[1], x_pcend),
mgr.And(mgr.Equals(x_x, x), mgr.Equals(x_y, y),
mgr.Equals(x_z, z))),
# (pc = 1 & pc' = 2) -> (x' = x & y' = y & z' = z),
mgr.Implies(
mgr.And(pcs[1], x_pcs[2]),
mgr.And(mgr.Equals(x_x, x), mgr.Equals(x_y, y),
mgr.Equals(x_z, z))),
# (pc = 2 & pc' = -1) -> (x' = x & y' = y & z' = z),
mgr.Implies(
mgr.And(pcs[2], x_pcend),
mgr.And(mgr.Equals(x_x, x), mgr.Equals(x_y, y),
mgr.Equals(x_z, z))),
# (pc = 2 & pc' = 3) -> (x' = x & y' = y & z' = z),
mgr.Implies(
mgr.And(pcs[2], x_pcs[3]),
mgr.And(mgr.Equals(x_x, x), mgr.Equals(x_y, y),
mgr.Equals(x_z, z))),
# (pc = 3 & pc' = 4) -> (x' = y*z - 1 & y' = y & z' = z),
mgr.Implies(
mgr.And(pcs[3], x_pcs[4]),
mgr.And(mgr.Equals(x_x, mgr.Minus(mgr.Times(y, z), ints[1])),
mgr.Equals(x_y, y), mgr.Equals(x_z, z))),
# (pc = 4 & pc' = 2) -> (x' = x & y' = y+1 & z' = z),
mgr.Implies(
mgr.And(pcs[4], x_pcs[2]),
mgr.And(mgr.Equals(x_x, x), mgr.Equals(x_y, mgr.Plus(y, ints[1])),
mgr.Equals(x_z, z))))
# transition relation.
trans = mgr.And(cfg, labels)
# fairness.
fairness = mgr.Not(pcend)
return symbols, init, trans, fairness
def hints(env: PysmtEnv) -> FrozenSet[Hint]:
assert isinstance(env, PysmtEnv)
mgr = env.formula_manager
pc = mgr.Symbol("pc", types.INT)
x = mgr.Symbol("x", types.INT)
y = mgr.Symbol("y", types.INT)
z = mgr.Symbol("z", types.INT)
symbs = frozenset([pc, x, y, z])
x_pc = symb_to_next(mgr, pc)
x_x = symb_to_next(mgr, x)
x_y = symb_to_next(mgr, y)
x_z = symb_to_next(mgr, z)
res = []
i_0 = mgr.Int(0)
i_1 = mgr.Int(1)
i_2 = mgr.Int(2)
i_3 = mgr.Int(3)
loc0 = Location(env, mgr.GE(y, i_3))
loc0.set_progress(1, mgr.Equals(x_y, mgr.Plus(y, i_1)))
loc1 = Location(env, mgr.GE(y, i_3))
loc1.set_progress(2, mgr.Equals(x_y, y))
loc2 = Location(env, mgr.GE(y, i_3))
loc2.set_progress(2, mgr.Equals(x_y, mgr.Plus(y, i_1)))
h_y = Hint("h_y4", env, frozenset([y]), symbs)
h_y.set_locs([loc0, loc1, loc2])
res.append(h_y)
stutter = mgr.Equals(x_x, x)
loc0 = Location(env, mgr.GT(x, i_0), mgr.And(mgr.GT(y, i_1), mgr.GT(z, i_1)))
loc0.set_progress(1, mgr.GE(x_x, mgr.Minus(mgr.Times(y, z), i_1)))
loc1 = Location(env, mgr.GT(x, i_0))
loc1.set_progress(0, mgr.Equals(x_x, mgr.Plus(x, i_1)))
h_x = Hint("h_x2", env, frozenset([x]), symbs)
h_x.set_locs([loc0, loc1])
res.append(h_x)
loc0 = Location(env, mgr.GT(x, i_3), mgr.And(mgr.GT(y, i_1), mgr.GT(z, i_1)))
loc0.set_progress(1, mgr.GE(x_x, mgr.Minus(mgr.Times(y, z), i_1)))
loc1 = Location(env, mgr.GT(x, i_0), mgr.GE(y, i_1))
loc1.set_progress(0, mgr.Equals(x_x, mgr.Plus(x, y)))
h_x = Hint("h_x3", env, frozenset([x]), symbs)
h_x.set_locs([loc0, loc1])
res.append(h_x)
loc0 = Location(env, mgr.GE(z, i_0))
loc0.set_progress(1, mgr.Equals(x_z, z))
loc1 = Location(env, mgr.GE(z, i_0))
loc1.set_progress(0, mgr.Equals(x_z, mgr.Plus(z, i_3)))
h_z = Hint("h_z4", env, frozenset([z]), symbs)
h_z.set_locs([loc0, loc1])
res.append(h_z)
loc0 = Location(env, mgr.GE(z, i_3))
loc0.set_progress(0, mgr.GT(x_z, z))
h_z = Hint("h_z1", env, frozenset([z]), symbs)
h_z.set_locs([loc0])
res.append(h_z)
loc = Location(env, mgr.LE(z, i_0))
loc.set_progress(0, mgr.Equals(x_z, z))
h_z = Hint("h_z0", env, frozenset([z]), symbs)
h_z.set_locs([loc])
res.append(h_z)
loc0 = Location(env, mgr.GE(y, i_3))
loc0.set_progress(1, mgr.Equals(x_y, mgr.Plus(y, i_1)))
loc1 = Location(env, mgr.GE(y, i_3), mgr.GE(x, i_2))
loc1.set_progress(0, mgr.Equals(x_y, mgr.Plus(y, x)))
h_y = Hint("h_y3", env, frozenset([y]), symbs)
h_y.set_locs([loc0, loc1])
res.append(h_y)
stutter = mgr.Equals(x_y, y)
loc0 = Location(env, mgr.GE(y, i_3))
loc0.set_progress(1, mgr.Equals(x_y, mgr.Plus(y, i_1)))
loc1 = Location(env, mgr.GE(y, i_3), mgr.GE(z, i_2))
loc1.set_progress(0, mgr.Equals(x_y, mgr.Plus(y, z)))
h_y = Hint("h_y2", env, frozenset([y]), symbs)
h_y.set_locs([loc0, loc1])
res.append(h_y)
loc0 = Location(env, mgr.Equals(pc, i_2))
loc0.set_progress(1, mgr.GT(x_pc, i_2))
loc1 = Location(env, mgr.GE(pc, i_3))
loc1.set_progress(0, mgr.Equals(x_pc, i_2))
h_pc = Hint("h_pc3", env, frozenset([pc]), symbs)
h_pc.set_locs([loc0, loc1])
res.append(h_pc)
loc0 = Location(env, mgr.GE(z, i_3), mgr.GE(y, i_0))
loc0.set_progress(1, mgr.Equals(x_z, y))
loc1 = Location(env, mgr.GE(z, i_0), mgr.GE(x, i_3))
loc1.set_progress(0, mgr.GE(x_z, mgr.Plus(z, x)))
h_z = Hint("h_z3", env, frozenset([z]), symbs)
h_z.set_locs([loc0, loc1])
res.append(h_z)
return frozenset(res)
| 34.169565
| 81
| 0.529457
|
from typing import FrozenSet, Tuple
import pysmt.typing as types
from pysmt.environment import Environment as PysmtEnv
from pysmt.fnode import FNode
from utils import symb_to_next
from hint import Hint, Location
def transition_system(env: PysmtEnv) -> Tuple[FrozenSet[FNode], FNode, FNode,
FNode]:
assert isinstance(env, PysmtEnv)
mgr = env.formula_manager
pc = mgr.Symbol("pc", types.INT)
x = mgr.Symbol("x", types.INT)
y = mgr.Symbol("y", types.INT)
z = mgr.Symbol("z", types.INT)
x_pc = symb_to_next(mgr, pc)
x_x = symb_to_next(mgr, x)
x_y = symb_to_next(mgr, y)
x_z = symb_to_next(mgr, z)
symbols = frozenset([pc, x, y, z])
n_locs = 5
int_bound = n_locs
pcs = []
x_pcs = []
ints = [mgr.Int(i) for i in range(int_bound)]
for l in range(n_locs):
n = ints[l]
pcs.append(mgr.Equals(pc, n))
x_pcs.append(mgr.Equals(x_pc, n))
m_1 = mgr.Int(-1)
pcend = mgr.Equals(pc, m_1)
x_pcend = mgr.Equals(x_pc, m_1)
init = pcs[0]
cfg = mgr.And(
mgr.Implies(pcend, x_pcend),
mgr.Implies(mgr.And(pcs[0], mgr.Not(mgr.GE(y, ints[1]))), x_pcend),
mgr.Implies(mgr.And(pcs[0], mgr.GE(y, ints[1])), x_pcs[1]),
mgr.Implies(mgr.And(pcs[1], mgr.Not(mgr.GE(z, ints[1]))), x_pcend),
mgr.Implies(mgr.And(pcs[1], mgr.GE(z, ints[1])), x_pcs[2]),
mgr.Implies(mgr.And(pcs[2], mgr.Not(mgr.GE(x, ints[0]))), x_pcend),
mgr.Implies(mgr.And(pcs[2], mgr.GE(x, ints[0])), x_pcs[3]),
mgr.Implies(pcs[3], x_pcs[4]),
mgr.Implies(pcs[4], x_pcs[2]))
labels = mgr.And(
mgr.Implies(
mgr.And(pcend, x_pcend),
mgr.And(mgr.Equals(x_x, x), mgr.Equals(x_y, y),
mgr.Equals(x_z, z))),
mgr.Implies(
mgr.And(pcs[0], x_pcend),
mgr.And(mgr.Equals(x_x, x), mgr.Equals(x_y, y),
mgr.Equals(x_z, z))),
mgr.Implies(
mgr.And(pcs[0], x_pcs[1]),
mgr.And(mgr.Equals(x_x, x), mgr.Equals(x_y, y),
mgr.Equals(x_z, z))),
mgr.Implies(
mgr.And(pcs[1], x_pcend),
mgr.And(mgr.Equals(x_x, x), mgr.Equals(x_y, y),
mgr.Equals(x_z, z))),
mgr.Implies(
mgr.And(pcs[1], x_pcs[2]),
mgr.And(mgr.Equals(x_x, x), mgr.Equals(x_y, y),
mgr.Equals(x_z, z))),
mgr.Implies(
mgr.And(pcs[2], x_pcend),
mgr.And(mgr.Equals(x_x, x), mgr.Equals(x_y, y),
mgr.Equals(x_z, z))),
mgr.Implies(
mgr.And(pcs[2], x_pcs[3]),
mgr.And(mgr.Equals(x_x, x), mgr.Equals(x_y, y),
mgr.Equals(x_z, z))),
mgr.Implies(
mgr.And(pcs[3], x_pcs[4]),
mgr.And(mgr.Equals(x_x, mgr.Minus(mgr.Times(y, z), ints[1])),
mgr.Equals(x_y, y), mgr.Equals(x_z, z))),
mgr.Implies(
mgr.And(pcs[4], x_pcs[2]),
mgr.And(mgr.Equals(x_x, x), mgr.Equals(x_y, mgr.Plus(y, ints[1])),
mgr.Equals(x_z, z))))
trans = mgr.And(cfg, labels)
fairness = mgr.Not(pcend)
return symbols, init, trans, fairness
def hints(env: PysmtEnv) -> FrozenSet[Hint]:
assert isinstance(env, PysmtEnv)
mgr = env.formula_manager
pc = mgr.Symbol("pc", types.INT)
x = mgr.Symbol("x", types.INT)
y = mgr.Symbol("y", types.INT)
z = mgr.Symbol("z", types.INT)
symbs = frozenset([pc, x, y, z])
x_pc = symb_to_next(mgr, pc)
x_x = symb_to_next(mgr, x)
x_y = symb_to_next(mgr, y)
x_z = symb_to_next(mgr, z)
res = []
i_0 = mgr.Int(0)
i_1 = mgr.Int(1)
i_2 = mgr.Int(2)
i_3 = mgr.Int(3)
loc0 = Location(env, mgr.GE(y, i_3))
loc0.set_progress(1, mgr.Equals(x_y, mgr.Plus(y, i_1)))
loc1 = Location(env, mgr.GE(y, i_3))
loc1.set_progress(2, mgr.Equals(x_y, y))
loc2 = Location(env, mgr.GE(y, i_3))
loc2.set_progress(2, mgr.Equals(x_y, mgr.Plus(y, i_1)))
h_y = Hint("h_y4", env, frozenset([y]), symbs)
h_y.set_locs([loc0, loc1, loc2])
res.append(h_y)
stutter = mgr.Equals(x_x, x)
loc0 = Location(env, mgr.GT(x, i_0), mgr.And(mgr.GT(y, i_1), mgr.GT(z, i_1)))
loc0.set_progress(1, mgr.GE(x_x, mgr.Minus(mgr.Times(y, z), i_1)))
loc1 = Location(env, mgr.GT(x, i_0))
loc1.set_progress(0, mgr.Equals(x_x, mgr.Plus(x, i_1)))
h_x = Hint("h_x2", env, frozenset([x]), symbs)
h_x.set_locs([loc0, loc1])
res.append(h_x)
loc0 = Location(env, mgr.GT(x, i_3), mgr.And(mgr.GT(y, i_1), mgr.GT(z, i_1)))
loc0.set_progress(1, mgr.GE(x_x, mgr.Minus(mgr.Times(y, z), i_1)))
loc1 = Location(env, mgr.GT(x, i_0), mgr.GE(y, i_1))
loc1.set_progress(0, mgr.Equals(x_x, mgr.Plus(x, y)))
h_x = Hint("h_x3", env, frozenset([x]), symbs)
h_x.set_locs([loc0, loc1])
res.append(h_x)
loc0 = Location(env, mgr.GE(z, i_0))
loc0.set_progress(1, mgr.Equals(x_z, z))
loc1 = Location(env, mgr.GE(z, i_0))
loc1.set_progress(0, mgr.Equals(x_z, mgr.Plus(z, i_3)))
h_z = Hint("h_z4", env, frozenset([z]), symbs)
h_z.set_locs([loc0, loc1])
res.append(h_z)
loc0 = Location(env, mgr.GE(z, i_3))
loc0.set_progress(0, mgr.GT(x_z, z))
h_z = Hint("h_z1", env, frozenset([z]), symbs)
h_z.set_locs([loc0])
res.append(h_z)
loc = Location(env, mgr.LE(z, i_0))
loc.set_progress(0, mgr.Equals(x_z, z))
h_z = Hint("h_z0", env, frozenset([z]), symbs)
h_z.set_locs([loc])
res.append(h_z)
loc0 = Location(env, mgr.GE(y, i_3))
loc0.set_progress(1, mgr.Equals(x_y, mgr.Plus(y, i_1)))
loc1 = Location(env, mgr.GE(y, i_3), mgr.GE(x, i_2))
loc1.set_progress(0, mgr.Equals(x_y, mgr.Plus(y, x)))
h_y = Hint("h_y3", env, frozenset([y]), symbs)
h_y.set_locs([loc0, loc1])
res.append(h_y)
stutter = mgr.Equals(x_y, y)
loc0 = Location(env, mgr.GE(y, i_3))
loc0.set_progress(1, mgr.Equals(x_y, mgr.Plus(y, i_1)))
loc1 = Location(env, mgr.GE(y, i_3), mgr.GE(z, i_2))
loc1.set_progress(0, mgr.Equals(x_y, mgr.Plus(y, z)))
h_y = Hint("h_y2", env, frozenset([y]), symbs)
h_y.set_locs([loc0, loc1])
res.append(h_y)
loc0 = Location(env, mgr.Equals(pc, i_2))
loc0.set_progress(1, mgr.GT(x_pc, i_2))
loc1 = Location(env, mgr.GE(pc, i_3))
loc1.set_progress(0, mgr.Equals(x_pc, i_2))
h_pc = Hint("h_pc3", env, frozenset([pc]), symbs)
h_pc.set_locs([loc0, loc1])
res.append(h_pc)
loc0 = Location(env, mgr.GE(z, i_3), mgr.GE(y, i_0))
loc0.set_progress(1, mgr.Equals(x_z, y))
loc1 = Location(env, mgr.GE(z, i_0), mgr.GE(x, i_3))
loc1.set_progress(0, mgr.GE(x_z, mgr.Plus(z, x)))
h_z = Hint("h_z3", env, frozenset([z]), symbs)
h_z.set_locs([loc0, loc1])
res.append(h_z)
return frozenset(res)
| true
| true
|
f705874db830f4f002d426666ec7d088d9bad4bc
| 2,291
|
py
|
Python
|
pages/models.py
|
allenamusin/metadata-verifier
|
7b2c61c231c49c722d1db9c9e83f157b6e2439f4
|
[
"MIT"
] | null | null | null |
pages/models.py
|
allenamusin/metadata-verifier
|
7b2c61c231c49c722d1db9c9e83f157b6e2439f4
|
[
"MIT"
] | 11
|
2020-02-12T03:26:35.000Z
|
2022-02-10T12:01:00.000Z
|
pages/models.py
|
allenamusin/metadata-verifier
|
7b2c61c231c49c722d1db9c9e83f157b6e2439f4
|
[
"MIT"
] | null | null | null |
import requests
from django.db import models
from django.utils import timezone
from users.models import CustomUser
from datetime import datetime
def get_coordinate(gps, ref):
coordinate = gps[0] + gps[1]/60 + gps[2]/3600
if ref == 'W':
coordinate = -coordinate
return coordinate
def get_timestamp(timestamp_string):
datetime_object = datetime.strptime(timestamp_string, '%Y:%m:%d %H:%M:%S')
return datetime_object
class Photo(models.Model):
name = models.CharField(max_length=120)
lat = models.DecimalField(max_digits=9, decimal_places=6)
lon = models.DecimalField(max_digits=9, decimal_places=6)
timestamp = models.DateTimeField(auto_now_add=True, auto_now=False)
user = models.ForeignKey(CustomUser, on_delete=models.CASCADE)
airspace_name = models.CharField(max_length=120, default='')
airspace_class = models.CharField(max_length=120, default='G')
def save_many(photos, user):
for photo in photos:
name = photo['ImageDescription']
lat = get_coordinate(photo['GPSLatitude'], photo['GPSLatitudeRef'])
lon = get_coordinate(photo['GPSLongitude'], photo['GPSLongitudeRef'])
timestamp = get_timestamp(photo['DateTimeOriginal'])
t = requests.post(
'http://airspace-service.herokuapp.com/geo/getAirspace',
data = {"longitude": lon, "latitude": lat}
)
airspace_data=t.json()
airspace_name =airspace_data['name']
airspace_class =airspace_data['class']
photo_model = Photo(
name=name,
lat=lat,
lon=lon,
timestamp=timestamp,
user=user,
airspace_name=airspace_name,
airspace_class=airspace_class
)
photo_model.save()
def get_all(user):
return Photo.objects.filter(user=user).values(
'id',
'name',
'lat',
'lon',
'timestamp',
'airspace_name',
'airspace_class'
)
def delete_all(user):
return Photo.objects.filter(user=user).delete()
def delete_one(user,id):
return Photo.objects.filter(user=user,id=id).delete()
| 34.712121
| 81
| 0.611523
|
import requests
from django.db import models
from django.utils import timezone
from users.models import CustomUser
from datetime import datetime
def get_coordinate(gps, ref):
coordinate = gps[0] + gps[1]/60 + gps[2]/3600
if ref == 'W':
coordinate = -coordinate
return coordinate
def get_timestamp(timestamp_string):
datetime_object = datetime.strptime(timestamp_string, '%Y:%m:%d %H:%M:%S')
return datetime_object
class Photo(models.Model):
name = models.CharField(max_length=120)
lat = models.DecimalField(max_digits=9, decimal_places=6)
lon = models.DecimalField(max_digits=9, decimal_places=6)
timestamp = models.DateTimeField(auto_now_add=True, auto_now=False)
user = models.ForeignKey(CustomUser, on_delete=models.CASCADE)
airspace_name = models.CharField(max_length=120, default='')
airspace_class = models.CharField(max_length=120, default='G')
def save_many(photos, user):
for photo in photos:
name = photo['ImageDescription']
lat = get_coordinate(photo['GPSLatitude'], photo['GPSLatitudeRef'])
lon = get_coordinate(photo['GPSLongitude'], photo['GPSLongitudeRef'])
timestamp = get_timestamp(photo['DateTimeOriginal'])
t = requests.post(
'http://airspace-service.herokuapp.com/geo/getAirspace',
data = {"longitude": lon, "latitude": lat}
)
airspace_data=t.json()
airspace_name =airspace_data['name']
airspace_class =airspace_data['class']
photo_model = Photo(
name=name,
lat=lat,
lon=lon,
timestamp=timestamp,
user=user,
airspace_name=airspace_name,
airspace_class=airspace_class
)
photo_model.save()
def get_all(user):
return Photo.objects.filter(user=user).values(
'id',
'name',
'lat',
'lon',
'timestamp',
'airspace_name',
'airspace_class'
)
def delete_all(user):
return Photo.objects.filter(user=user).delete()
def delete_one(user,id):
return Photo.objects.filter(user=user,id=id).delete()
| true
| true
|
f705892d7832be8bc0c55823b07c8570e5d2852f
| 442
|
py
|
Python
|
src/container.py
|
sudeep0901/python
|
7a50af12e72d21ca4cad7f2afa4c6f929552043f
|
[
"MIT"
] | null | null | null |
src/container.py
|
sudeep0901/python
|
7a50af12e72d21ca4cad7f2afa4c6f929552043f
|
[
"MIT"
] | 3
|
2019-12-26T05:13:55.000Z
|
2020-03-07T06:59:56.000Z
|
src/container.py
|
sudeep0901/python
|
7a50af12e72d21ca4cad7f2afa4c6f929552043f
|
[
"MIT"
] | null | null | null |
a = [1, "a"]
print(list)
print(dir(list))
list = [1, "a"]
print(dir(list))
tuple = ("a", "b")
print(list)
print(tuple)
dictn = {"key": "dictionary",
"d" :a}
print(dictn)
def factorial(n):
"Factorial calculation string document string"
# print("Calculating factorial of ", n)
if n <= 1: return 1
else: return n * factorial(n - 1)
print(factorial(100))
# printing document string
print(factorial.__doc__)
| 14.733333
| 50
| 0.615385
|
a = [1, "a"]
print(list)
print(dir(list))
list = [1, "a"]
print(dir(list))
tuple = ("a", "b")
print(list)
print(tuple)
dictn = {"key": "dictionary",
"d" :a}
print(dictn)
def factorial(n):
if n <= 1: return 1
else: return n * factorial(n - 1)
print(factorial(100))
print(factorial.__doc__)
| true
| true
|
f7058b34f1196160d71f990667c61db4148e381e
| 3,132
|
py
|
Python
|
pype/plugins/maya/publish/extract_animation.py
|
kalisp/pype
|
28bbffaf2d12ccee48313cd9985e8dfa05e81a5c
|
[
"MIT"
] | null | null | null |
pype/plugins/maya/publish/extract_animation.py
|
kalisp/pype
|
28bbffaf2d12ccee48313cd9985e8dfa05e81a5c
|
[
"MIT"
] | null | null | null |
pype/plugins/maya/publish/extract_animation.py
|
kalisp/pype
|
28bbffaf2d12ccee48313cd9985e8dfa05e81a5c
|
[
"MIT"
] | null | null | null |
import os
from maya import cmds
import avalon.maya
import pype.api
from pype.hosts.maya.lib import extract_alembic
class ExtractAnimation(pype.api.Extractor):
"""Produce an alembic of just point positions and normals.
Positions and normals, uvs, creases are preserved, but nothing more,
for plain and predictable point caches.
"""
label = "Extract Animation"
hosts = ["maya"]
families = ["animation"]
def process(self, instance):
# Collect the out set nodes
out_sets = [node for node in instance if node.endswith("out_SET")]
if len(out_sets) != 1:
raise RuntimeError("Couldn't find exactly one out_SET: "
"{0}".format(out_sets))
out_set = out_sets[0]
roots = cmds.sets(out_set, query=True)
# Include all descendants
nodes = roots + cmds.listRelatives(roots,
allDescendents=True,
fullPath=True) or []
# Collect the start and end including handles
start = instance.data["frameStart"]
end = instance.data["frameEnd"]
handles = instance.data.get("handles", 0) or 0
if handles:
start -= handles
end += handles
self.log.info("Extracting animation..")
dirname = self.staging_dir(instance)
parent_dir = self.staging_dir(instance)
filename = "{name}.abc".format(**instance.data)
path = os.path.join(parent_dir, filename)
options = {
"step": instance.data.get("step", 1.0) or 1.0,
"attr": ["cbId"],
"writeVisibility": True,
"writeCreases": True,
"uvWrite": True,
"selection": True,
"worldSpace": instance.data.get("worldSpace", True),
"writeColorSets": instance.data.get("writeColorSets", False)
}
if not instance.data.get("includeParentHierarchy", True):
# Set the root nodes if we don't want to include parents
# The roots are to be considered the ones that are the actual
# direct members of the set
options["root"] = roots
if int(cmds.about(version=True)) >= 2017:
# Since Maya 2017 alembic supports multiple uv sets - write them.
options["writeUVSets"] = True
with avalon.maya.suspended_refresh():
with avalon.maya.maintained_selection():
cmds.select(nodes, noExpand=True)
extract_alembic(file=path,
startFrame=float(start),
endFrame=float(end),
**options)
if "representations" not in instance.data:
instance.data["representations"] = []
representation = {
'name': 'abc',
'ext': 'abc',
'files': filename,
"stagingDir": dirname,
}
instance.data["representations"].append(representation)
self.log.info("Extracted {} to {}".format(instance, dirname))
| 33.677419
| 77
| 0.559387
|
import os
from maya import cmds
import avalon.maya
import pype.api
from pype.hosts.maya.lib import extract_alembic
class ExtractAnimation(pype.api.Extractor):
label = "Extract Animation"
hosts = ["maya"]
families = ["animation"]
def process(self, instance):
out_sets = [node for node in instance if node.endswith("out_SET")]
if len(out_sets) != 1:
raise RuntimeError("Couldn't find exactly one out_SET: "
"{0}".format(out_sets))
out_set = out_sets[0]
roots = cmds.sets(out_set, query=True)
# Include all descendants
nodes = roots + cmds.listRelatives(roots,
allDescendents=True,
fullPath=True) or []
# Collect the start and end including handles
start = instance.data["frameStart"]
end = instance.data["frameEnd"]
handles = instance.data.get("handles", 0) or 0
if handles:
start -= handles
end += handles
self.log.info("Extracting animation..")
dirname = self.staging_dir(instance)
parent_dir = self.staging_dir(instance)
filename = "{name}.abc".format(**instance.data)
path = os.path.join(parent_dir, filename)
options = {
"step": instance.data.get("step", 1.0) or 1.0,
"attr": ["cbId"],
"writeVisibility": True,
"writeCreases": True,
"uvWrite": True,
"selection": True,
"worldSpace": instance.data.get("worldSpace", True),
"writeColorSets": instance.data.get("writeColorSets", False)
}
if not instance.data.get("includeParentHierarchy", True):
# Set the root nodes if we don't want to include parents
options["root"] = roots
if int(cmds.about(version=True)) >= 2017:
options["writeUVSets"] = True
with avalon.maya.suspended_refresh():
with avalon.maya.maintained_selection():
cmds.select(nodes, noExpand=True)
extract_alembic(file=path,
startFrame=float(start),
endFrame=float(end),
**options)
if "representations" not in instance.data:
instance.data["representations"] = []
representation = {
'name': 'abc',
'ext': 'abc',
'files': filename,
"stagingDir": dirname,
}
instance.data["representations"].append(representation)
self.log.info("Extracted {} to {}".format(instance, dirname))
| true
| true
|
f7058bd93bd1ec52a35d57cf42075a5bb3de9861
| 14,085
|
py
|
Python
|
passerine/db/session.py
|
shiroyuki/passerine
|
6e50ca4a8892da51af68561ac01601bfe8c9fd9c
|
[
"MIT"
] | null | null | null |
passerine/db/session.py
|
shiroyuki/passerine
|
6e50ca4a8892da51af68561ac01601bfe8c9fd9c
|
[
"MIT"
] | 1
|
2017-03-11T12:15:55.000Z
|
2017-03-11T12:15:55.000Z
|
passerine/db/session.py
|
shiroyuki/passerine
|
6e50ca4a8892da51af68561ac01601bfe8c9fd9c
|
[
"MIT"
] | null | null | null |
import re
from passerine.db.common import ProxyObject, ProxyFactory, ProxyCollection
from passerine.db.repository import Repository
from passerine.db.entity import get_relational_map
from passerine.db.exception import IntegrityConstraintError, UnsupportedRepositoryReferenceError
from passerine.db.mapper import AssociationType
from passerine.db.metadata.entity import EntityMetadata
from passerine.db.metadata.helper import EntityMetadataHelper
from passerine.db.uow import UnitOfWork
from passerine.graph import DependencyNode, DependencyManager
class QueryIteration(DependencyNode):
def __init__(self, join_config, alias, parent_alias, property_path):
super(QueryIteration, self).__init__()
self._join_config = join_config
self._alias = alias
self._parent_alias = parent_alias
self._property_path = property_path
@property
def join_config(self):
return self._join_config
@property
def alias(self):
return self._alias
@property
def parent_alias(self):
return self._parent_alias
@property
def property_path(self):
return self._property_path
def to_dict(self):
return {
'property_path': self.property_path,
'parent_alias': self.parent_alias,
'alias': self.alias,
'join_config': self.join_config,
'adjacent_nodes':self.adjacent_nodes
}
def __repr__(self):
return str('{}({})'.format(self.__class__.__name__, self.to_dict()))
class Session(object):
""" Database Session
:param database_name: the database name
:param driver: the driver API
"""
def __init__(self, driver):
self._driver = driver
self._uow = UnitOfWork(self)
self._repository_map = {}
self._registered_types = {}
self._re_property_path_delimiter = re.compile('\.')
@property
def driver(self):
return self._driver
def collection(self, entity_class):
""" Alias to ``repository()``
.. deprecated:: 2.2
"""
return self.repository(entity_class)
def repositories(self):
""" Retrieve the list of collections
:rtype: list
"""
return [self._repository_map[key] for key in self._repository_map]
def repository(self, reference):
""" Retrieve the collection
:param reference: the entity class or entity metadata of the target repository / collection
:rtype: passerine.db.repository.Repository
"""
key = None
if isinstance(reference, EntityMetadata):
key = reference.collection_name
elif EntityMetadataHelper.hasMetadata(reference):
is_registerable_reference = True
metadata = EntityMetadataHelper.extract(reference)
key = metadata.collection_name
self.register_class(reference)
if not key:
raise UnsupportedRepositoryReferenceError('Either a class with metadata or an entity metadata is supported.')
if key not in self._repository_map:
repository = Repository(
session = self,
representing_class = reference
)
repository.setup_index()
self._repository_map[key] = repository
return self._repository_map[key]
def register_class(self, entity_class):
""" Register the entity class
:param type entity_class: the class of document/entity
:rtype: passerine.db.repository.Repository
.. note::
This is for internal operation only. As it seems to be just a
residual from the prototype stage, the follow-up investigation
in order to remove the method will be for Tori 3.1.
"""
key = entity_class
if isinstance(entity_class, type):
metadata = EntityMetadataHelper.extract(entity_class)
key = metadata.collection_name
if key not in self._registered_types:
self._registered_types[key] = entity_class
def query(self, query):
""" Query the data
:param passerine.db.query.Query query: the query object
:return: the list of matched entities
:rtype: list
"""
metadata = EntityMetadataHelper.extract(query.origin)
# Deprecated in Tori 3.1; Only for backward compatibility
if not query.is_new_style:
return self.driver.query(
metadata,
query._condition,
self.driver.dialect.get_iterating_constrains(query)
)
root_class = query.origin
expression_set = query.criteria.get_analyzed_version()
# Register the root entity
query.join_map[query.alias] = {
'alias': query.alias,
'path': None,
'class': root_class,
'parent_alias': None,
'property_path': None,
'result_list': []
}
self._update_join_map(metadata, query.join_map, query.alias)
iterating_sequence = self._compute_iterating_sequence(query.join_map)
alias_to_query_map = self.driver.dialect.get_alias_to_native_query_map(query)
for iteration in iterating_sequence:
if not self._sub_query(query, alias_to_query_map, iteration):
break
return query.join_map[query.alias]['result_list']
def _sub_query(self, query, alias_to_query_map, iteration):
is_join_query = True
alias = iteration.alias
if alias not in alias_to_query_map:
return False
join_config = query.join_map[alias]
joined_type = join_config['class']
joined_meta = EntityMetadataHelper.extract(joined_type)
native_query = alias_to_query_map[alias]
local_constrains = {}
if not iteration.parent_alias:
is_root = False
constrains = self.driver.dialect.get_iterating_constrains(query)
result_list = self.driver.query(joined_meta, native_query, local_constrains)
# No result in a sub-query means no result in the main query.
if not result_list:
return False
join_config['result_list'] = result_list
alias_to_query_map.update(self.driver.dialect.get_alias_to_native_query_map(query))
return True
def _compute_iterating_sequence(self, join_map):
iterating_sequence = []
joining_sequence = []
reference_map = {}
# reference_map is used locally for fast reverse lookup
# iterating_seq is a final sequence
# Calculate the iterating sequence
for alias in join_map:
join_config = join_map[alias]
parent_alias = None
property_path = None
if join_config['path']:
parent_alias, property_path = join_config['path'].split('.', 2)
qi = QueryIteration(join_config, alias, parent_alias, property_path)
joining_sequence.append(qi)
reference_map[alias] = qi
# Update the dependency map
for key in reference_map:
reference_a = reference_map[key]
if reference_a.parent_alias not in reference_map:
continue
reference_a.connect(reference_map[reference_a.parent_alias])
iterating_sequence = DependencyManager.get_order(reference_map)
iterating_sequence.reverse()
return iterating_sequence
def _update_join_map(self, origin_metadata, join_map, origin_alias):
link_map = origin_metadata.relational_map
iterating_sequence = []
# Compute the (local) iterating sequence for updating the join map.
# Note: this is not the query iterating sequence.
for alias in join_map:
join_config = join_map[alias]
if join_config['class']:
continue
parent_alias, property_path = join_config['path'].split('.', 2)
join_config['alias'] = alias
join_config['property_path'] = property_path
join_config['parent_alias'] = parent_alias
join_config['result_list'] = []
iterating_sequence.append((join_config, alias, parent_alias, property_path))
# Update the immediate properties.
for join_config, current_alias, parent_alias, property_path in iterating_sequence:
if parent_alias != origin_alias:
continue
if property_path not in link_map:
continue
mapper = link_map[property_path]
join_config['class'] = mapper.target_class
join_config['mapper'] = mapper
# Update the joined properties.
for join_config, current_alias, parent_alias, property_path in iterating_sequence:
if current_alias not in join_map:
continue
if not join_map[current_alias]['class']:
continue
next_origin_class = join_map[current_alias]['class']
next_metadata = EntityMetadataHelper.extract(next_origin_class)
self._update_join_map(next_metadata, join_map, current_alias)
def delete(self, *entities):
""" Delete entities
:param entities: one or more entities
:type entities: type of list of type
"""
for entity in entities:
targeted_entity = self._force_load(entity)
self._uow.register_deleted(targeted_entity)
def refresh(self, *entities):
""" Refresh entities
:param entities: one or more entities
:type entities: type of list of type
"""
for entity in entities:
self.refresh_one(entity)
def refresh_one(self, entity):
self._uow.refresh(self._force_load(entity))
def persist(self, *entities):
""" Persist entities
:param entities: one or more entities
:type entities: type of list of type
"""
for entity in entities:
self.persist_one(entity)
def persist_one(self, entity):
targeted_entity = self._force_load(entity)
registering_action = self._uow.register_new \
if self._uow.is_new(targeted_entity) \
else self._uow.register_dirty
registering_action(targeted_entity)
def recognize(self, entity):
self._uow.register_clean(self._force_load(entity))
def flush(self, *args, **kwargs):
""" Flush all changes of the session.
See the flag from :method:`passerine.db.uow.UnitOfWork.commit`.
"""
self._uow.commit(*args, **kwargs)
def find_record(self, id, cls):
return self._uow.find_recorded_entity(id, cls)
def apply_relational_map(self, entity):
""" Wire connections according to the relational map """
meta = EntityMetadataHelper.extract(entity)
rmap = meta.relational_map
for property_name in rmap:
guide = rmap[property_name]
""" :type: passerine.db.mapper.RelatingGuide """
# In the reverse mapping, the lazy loading is not possible but so
# the proxy object is still used.
if guide.inverted_by:
target_meta = EntityMetadataHelper.extract(guide.target_class)
api = self._driver.collection(target_meta.collection_name)
if guide.association in [AssociationType.ONE_TO_ONE, AssociationType.MANY_TO_ONE]:
# Replace with Criteria
target = api.find_one({guide.inverted_by: entity.id})
entity.__setattr__(property_name, ProxyFactory.make(self, target['_id'], guide))
elif guide.association == AssociationType.ONE_TO_MANY:
# Replace with Criteria
proxy_list = [
ProxyFactory.make(self, target['_id'], guide)
for target in api.find({guide.inverted_by: entity.id})
]
entity.__setattr__(property_name, proxy_list)
elif guide.association == AssociationType.MANY_TO_MANY:
entity.__setattr__(property_name, ProxyCollection(self, entity, guide))
else:
raise IntegrityConstraintError('Unknown type of entity association (reverse mapping)')
return # Done the application
# In the direct mapping, the lazy loading is applied wherever applicable.
if guide.association in [AssociationType.ONE_TO_ONE, AssociationType.MANY_TO_ONE]:
if not entity.__getattribute__(property_name):
continue
entity.__setattr__(
property_name,
ProxyFactory.make(
self,
entity.__getattribute__(property_name),
guide
)
)
elif guide.association == AssociationType.ONE_TO_MANY:
proxy_list = []
for object_id in entity.__getattribute__(property_name):
if not object_id:
continue
proxy_list.append(ProxyFactory.make(self, object_id, guide))
entity.__setattr__(property_name, proxy_list)
elif guide.association == AssociationType.MANY_TO_MANY:
entity.__setattr__(property_name, ProxyCollection(self, entity, guide))
else:
raise IntegrityConstraintError('Unknown type of entity association')
def _force_load(self, entity):
return entity._actual \
if isinstance(entity, ProxyObject) \
else entity
| 34.186893
| 121
| 0.61349
|
import re
from passerine.db.common import ProxyObject, ProxyFactory, ProxyCollection
from passerine.db.repository import Repository
from passerine.db.entity import get_relational_map
from passerine.db.exception import IntegrityConstraintError, UnsupportedRepositoryReferenceError
from passerine.db.mapper import AssociationType
from passerine.db.metadata.entity import EntityMetadata
from passerine.db.metadata.helper import EntityMetadataHelper
from passerine.db.uow import UnitOfWork
from passerine.graph import DependencyNode, DependencyManager
class QueryIteration(DependencyNode):
def __init__(self, join_config, alias, parent_alias, property_path):
super(QueryIteration, self).__init__()
self._join_config = join_config
self._alias = alias
self._parent_alias = parent_alias
self._property_path = property_path
@property
def join_config(self):
return self._join_config
@property
def alias(self):
return self._alias
@property
def parent_alias(self):
return self._parent_alias
@property
def property_path(self):
return self._property_path
def to_dict(self):
return {
'property_path': self.property_path,
'parent_alias': self.parent_alias,
'alias': self.alias,
'join_config': self.join_config,
'adjacent_nodes':self.adjacent_nodes
}
def __repr__(self):
return str('{}({})'.format(self.__class__.__name__, self.to_dict()))
class Session(object):
def __init__(self, driver):
self._driver = driver
self._uow = UnitOfWork(self)
self._repository_map = {}
self._registered_types = {}
self._re_property_path_delimiter = re.compile('\.')
@property
def driver(self):
return self._driver
def collection(self, entity_class):
return self.repository(entity_class)
def repositories(self):
return [self._repository_map[key] for key in self._repository_map]
def repository(self, reference):
key = None
if isinstance(reference, EntityMetadata):
key = reference.collection_name
elif EntityMetadataHelper.hasMetadata(reference):
is_registerable_reference = True
metadata = EntityMetadataHelper.extract(reference)
key = metadata.collection_name
self.register_class(reference)
if not key:
raise UnsupportedRepositoryReferenceError('Either a class with metadata or an entity metadata is supported.')
if key not in self._repository_map:
repository = Repository(
session = self,
representing_class = reference
)
repository.setup_index()
self._repository_map[key] = repository
return self._repository_map[key]
def register_class(self, entity_class):
key = entity_class
if isinstance(entity_class, type):
metadata = EntityMetadataHelper.extract(entity_class)
key = metadata.collection_name
if key not in self._registered_types:
self._registered_types[key] = entity_class
def query(self, query):
metadata = EntityMetadataHelper.extract(query.origin)
if not query.is_new_style:
return self.driver.query(
metadata,
query._condition,
self.driver.dialect.get_iterating_constrains(query)
)
root_class = query.origin
expression_set = query.criteria.get_analyzed_version()
query.join_map[query.alias] = {
'alias': query.alias,
'path': None,
'class': root_class,
'parent_alias': None,
'property_path': None,
'result_list': []
}
self._update_join_map(metadata, query.join_map, query.alias)
iterating_sequence = self._compute_iterating_sequence(query.join_map)
alias_to_query_map = self.driver.dialect.get_alias_to_native_query_map(query)
for iteration in iterating_sequence:
if not self._sub_query(query, alias_to_query_map, iteration):
break
return query.join_map[query.alias]['result_list']
def _sub_query(self, query, alias_to_query_map, iteration):
is_join_query = True
alias = iteration.alias
if alias not in alias_to_query_map:
return False
join_config = query.join_map[alias]
joined_type = join_config['class']
joined_meta = EntityMetadataHelper.extract(joined_type)
native_query = alias_to_query_map[alias]
local_constrains = {}
if not iteration.parent_alias:
is_root = False
constrains = self.driver.dialect.get_iterating_constrains(query)
result_list = self.driver.query(joined_meta, native_query, local_constrains)
if not result_list:
return False
join_config['result_list'] = result_list
alias_to_query_map.update(self.driver.dialect.get_alias_to_native_query_map(query))
return True
def _compute_iterating_sequence(self, join_map):
iterating_sequence = []
joining_sequence = []
reference_map = {}
for alias in join_map:
join_config = join_map[alias]
parent_alias = None
property_path = None
if join_config['path']:
parent_alias, property_path = join_config['path'].split('.', 2)
qi = QueryIteration(join_config, alias, parent_alias, property_path)
joining_sequence.append(qi)
reference_map[alias] = qi
for key in reference_map:
reference_a = reference_map[key]
if reference_a.parent_alias not in reference_map:
continue
reference_a.connect(reference_map[reference_a.parent_alias])
iterating_sequence = DependencyManager.get_order(reference_map)
iterating_sequence.reverse()
return iterating_sequence
def _update_join_map(self, origin_metadata, join_map, origin_alias):
link_map = origin_metadata.relational_map
iterating_sequence = []
for alias in join_map:
join_config = join_map[alias]
if join_config['class']:
continue
parent_alias, property_path = join_config['path'].split('.', 2)
join_config['alias'] = alias
join_config['property_path'] = property_path
join_config['parent_alias'] = parent_alias
join_config['result_list'] = []
iterating_sequence.append((join_config, alias, parent_alias, property_path))
for join_config, current_alias, parent_alias, property_path in iterating_sequence:
if parent_alias != origin_alias:
continue
if property_path not in link_map:
continue
mapper = link_map[property_path]
join_config['class'] = mapper.target_class
join_config['mapper'] = mapper
for join_config, current_alias, parent_alias, property_path in iterating_sequence:
if current_alias not in join_map:
continue
if not join_map[current_alias]['class']:
continue
next_origin_class = join_map[current_alias]['class']
next_metadata = EntityMetadataHelper.extract(next_origin_class)
self._update_join_map(next_metadata, join_map, current_alias)
def delete(self, *entities):
for entity in entities:
targeted_entity = self._force_load(entity)
self._uow.register_deleted(targeted_entity)
def refresh(self, *entities):
for entity in entities:
self.refresh_one(entity)
def refresh_one(self, entity):
self._uow.refresh(self._force_load(entity))
def persist(self, *entities):
for entity in entities:
self.persist_one(entity)
def persist_one(self, entity):
targeted_entity = self._force_load(entity)
registering_action = self._uow.register_new \
if self._uow.is_new(targeted_entity) \
else self._uow.register_dirty
registering_action(targeted_entity)
def recognize(self, entity):
self._uow.register_clean(self._force_load(entity))
def flush(self, *args, **kwargs):
self._uow.commit(*args, **kwargs)
def find_record(self, id, cls):
return self._uow.find_recorded_entity(id, cls)
def apply_relational_map(self, entity):
meta = EntityMetadataHelper.extract(entity)
rmap = meta.relational_map
for property_name in rmap:
guide = rmap[property_name]
if guide.inverted_by:
target_meta = EntityMetadataHelper.extract(guide.target_class)
api = self._driver.collection(target_meta.collection_name)
if guide.association in [AssociationType.ONE_TO_ONE, AssociationType.MANY_TO_ONE]:
target = api.find_one({guide.inverted_by: entity.id})
entity.__setattr__(property_name, ProxyFactory.make(self, target['_id'], guide))
elif guide.association == AssociationType.ONE_TO_MANY:
proxy_list = [
ProxyFactory.make(self, target['_id'], guide)
for target in api.find({guide.inverted_by: entity.id})
]
entity.__setattr__(property_name, proxy_list)
elif guide.association == AssociationType.MANY_TO_MANY:
entity.__setattr__(property_name, ProxyCollection(self, entity, guide))
else:
raise IntegrityConstraintError('Unknown type of entity association (reverse mapping)')
return
if guide.association in [AssociationType.ONE_TO_ONE, AssociationType.MANY_TO_ONE]:
if not entity.__getattribute__(property_name):
continue
entity.__setattr__(
property_name,
ProxyFactory.make(
self,
entity.__getattribute__(property_name),
guide
)
)
elif guide.association == AssociationType.ONE_TO_MANY:
proxy_list = []
for object_id in entity.__getattribute__(property_name):
if not object_id:
continue
proxy_list.append(ProxyFactory.make(self, object_id, guide))
entity.__setattr__(property_name, proxy_list)
elif guide.association == AssociationType.MANY_TO_MANY:
entity.__setattr__(property_name, ProxyCollection(self, entity, guide))
else:
raise IntegrityConstraintError('Unknown type of entity association')
def _force_load(self, entity):
return entity._actual \
if isinstance(entity, ProxyObject) \
else entity
| true
| true
|
f7058c504d2e6bb6e65bff54137c6efbe95c484b
| 13,954
|
py
|
Python
|
parallize.py
|
sksg/parallize
|
58d211fd92a4cac97b1d7795932157b839e42b2b
|
[
"MIT"
] | null | null | null |
parallize.py
|
sksg/parallize
|
58d211fd92a4cac97b1d7795932157b839e42b2b
|
[
"MIT"
] | null | null | null |
parallize.py
|
sksg/parallize
|
58d211fd92a4cac97b1d7795932157b839e42b2b
|
[
"MIT"
] | null | null | null |
import numpy as np
from numpy.core.numerictypes import typecodes
import inspect
import functools
import re
import builtins
import os
from concurrent.futures import ThreadPoolExecutor as thread_pool
from concurrent.futures import ProcessPoolExecutor as process_pool
from concurrent.futures import as_completed
def _iterable(y):
try:
iter(y)
except TypeError:
return False
return True
# We use an extended version of:
# http://docs.scipy.org/doc/numpy/reference/c-api.generalized-ufuncs.html
_DIMENSION_NAME = r'\w+'
_CORE_DIMENSION_LIST = '(?:{0:}(?:,{0:})*)?'.format(_DIMENSION_NAME)
_VECTOR_ARGUMENT = r'(\({}\))'.format(_CORE_DIMENSION_LIST)
_EXCLUDED_ARGUMENT = r'(_)'
_ARGUMENT = r'(?:{0:}|{1:})'.format(_VECTOR_ARGUMENT, _EXCLUDED_ARGUMENT)
_ARGUMENT_LIST = '{0:}(?:,{0:})*'.format(_ARGUMENT)
_OUT_ARGUMENT_LIST = '{0:}(?:,{0:})*'.format(_VECTOR_ARGUMENT)
_SIGNATURE = '^{0:}->{1:}$'.format(_ARGUMENT_LIST, _OUT_ARGUMENT_LIST)
def _parse_signature(signature):
if not re.match(_SIGNATURE, signature):
raise ValueError(
'not a valid gufunc signature: {}'.format(signature))
inargs, outargs = [], []
_in, _out = signature.split('->')
for arg in re.findall(_ARGUMENT, _in):
if arg[1] == "_":
inargs.append(None)
else:
inarg = []
for match in re.findall(_DIMENSION_NAME, arg[0]):
try:
inarg.append(int(match))
except:
inarg.append(match)
inargs.append(tuple(inarg))
for arg in re.findall(_ARGUMENT, _out):
if arg[1] == "_":
outargs.append(None)
else:
outarg = []
for match in re.findall(_DIMENSION_NAME, arg[0]):
try:
outarg.append(int(match))
except:
outarg.append(match)
outargs.append(tuple(outarg))
return inargs, outargs
def _update_dim_sizes(dim_sizes, arg, core_dims):
if not core_dims:
return
num_core_dims = len(core_dims)
if arg.ndim < num_core_dims:
raise ValueError('%d-dimensional argument does not have enough '
'dimensions for all core dimensions %r'
% (arg.ndim, core_dims))
core_shape = arg.shape[-num_core_dims:]
for dim, size in zip(core_dims, core_shape):
if dim in dim_sizes:
if size != dim_sizes[dim]:
raise ValueError('inconsistent size for core dimension'
' %r: %r vs %r'
% (dim, size, dim_sizes[dim]))
elif isinstance(dim, str):
dim_sizes[dim] = size
elif dim != size:
raise ValueError('inconsistent size for core dimension: %r vs %r'
% (dim, size))
def _parse_input_dimensions(args, arg_dims):
dim_sizes = {}
broadcast_args = []
for a, dims in zip(args, arg_dims):
if dims is None:
broadcast_args.append(None)
continue
_update_dim_sizes(dim_sizes, a, dims)
ndim = a.ndim - len(dims)
dummy_array = np.lib.stride_tricks.as_strided(0, a.shape[:ndim])
broadcast_args.append(dummy_array)
broadcast_shape = np.lib.stride_tricks._broadcast_shape(*broadcast_args)
return broadcast_shape, dim_sizes
def _calculate_shapes(broadcast_shape, dim_sizes, list_of_core_dims):
return [(broadcast_shape + tuple((dim_sizes[dim]
if isinstance(dim, str) else dim)
for dim in core_dims)
if core_dims is not None else None)
for core_dims in list_of_core_dims]
def _create_arrays(broadcast_shape, dim_sizes, list_of_core_dims, dtypes):
shapes = _calculate_shapes(broadcast_shape, dim_sizes, list_of_core_dims)
arrays = tuple(np.empty(shape, dtype=dtype)
for shape, dtype in zip(shapes, dtypes))
return arrays
def parallize(signature, otypes=None, doc=None, default='parallelenv',
evn='MEGA_PARALLIZE', isvec=False, parallel='threads',
sendindex=False):
def wrap_parallized(pyfunc):
return parallized(pyfunc, signature, otypes, doc, default,
evn, isvec, parallel, sendindex)
return wrap_parallized
class parallized(object): # inspired by np.vectorize
def __init__(self, pyfunc, signature, otypes=None, doc=None,
default='parallel', evn='MEGA_PARALLIZE', isvec=False,
parallel_type='threads', sendindex=False):
self.signature = signature
self.default = default
self.evn = evn
self.isvec = isvec
self.parallel_type = parallel_type
self.sendindex = sendindex
self._ufunc = None # Caching to improve default performance
if doc is not None:
self.__doc__ = doc
else:
self.__doc__ = pyfunc.__doc__
if isinstance(otypes, str):
for char in otypes:
if char not in typecodes['All']:
raise ValueError("Invalid otype specified: %s" % (char,))
elif _iterable(otypes):
otypes = ''.join([np.dtype(x).char for x in otypes])
elif otypes is not None:
raise ValueError("Invalid otype specification")
self.otypes = otypes
self._in, self._out = _parse_signature(signature)
self.excluded = [(a is None) for a in self._in]
self.pyfunc = pyfunc
self.__wrapped__ = pyfunc
self.parameters = [k for k in inspect.signature(pyfunc).parameters]
if self.sendindex:
self.parameters = self.parameters[1:]
def _process_args(self, args, kwargs):
givenargs = list(args)
allargs = []
for p in self.parameters:
if p in kwargs:
allargs.append(kwargs.pop(p))
else:
if len(args) == 0:
msg = 'expected {}, got {}'.format(len(self.parameters),
len(givenargs))
raise TypeError("Missing positional arguments: " + msg)
allargs.append(args[0])
args = args[1:]
if len(kwargs) != 0:
raise TypeError("Unknown keyword arguments {}!".format(kwargs))
if len(args) != 0:
msg = 'expected {}, got {}'.format(len(self.parameters),
len(givenargs))
raise TypeError("Too many positional arguments: " + msg)
args = tuple((np.asanyarray(a) if not ex else a)
for a, ex in zip(allargs, self.excluded))
broadcast_shape, dim_sizes = _parse_input_dimensions(args, self._in)
input_shapes = _calculate_shapes(broadcast_shape, dim_sizes, self._in)
args = [(np.broadcast_to(arg, shape, subok=True)
if shape is not None else arg)
for arg, shape in zip(args, input_shapes)]
return broadcast_shape, dim_sizes, args
def __call__(self, *args, **kwargs):
if self.default is 'parallel':
return self.parallel(*args, **kwargs)
if self.default is 'sequential':
return self.sequential(*args, **kwargs)
if self.default is 'vectorized':
return self.vectorized(*args, **kwargs)
if self.default is 'parallelenv':
if self.evn in os.environ and not os.environ[self.evn]:
return self.vectorized(*args, **kwargs)
else:
return self.parallel(*args, **kwargs)
def vectorized(self, *args, **kwargs):
if self.isvec:
if self.sendindex:
return self.pyfunc(None, *args, **kwargs)
else:
return self.pyfunc(*args, **kwargs)
else:
return self.sequential(*args, **kwargs)
def sequential(self, *args, **kwargs):
broadcast_shape, dim_sizes, args = self._process_args(args, kwargs)
outputs = None
otypes = self.otypes
nout = len(self._out)
for index in np.ndindex(*broadcast_shape):
i_args = ((arg[index] if _in is not None else arg)
for _in, arg in zip(self._in, args))
if self.sendindex:
results = self.pyfunc(index, *i_args)
else:
results = self.pyfunc(*i_args)
n_results = len(results) if isinstance(results, tuple) else 1
if nout != n_results:
raise ValueError(
'wrong number of outputs from pyfunc: expected %r, got %r'
% (nout, n_results))
if nout == 1:
results = (results,)
if outputs is None:
for result, core_dims in zip(results, self._out):
_update_dim_sizes(dim_sizes, result, core_dims)
if otypes is None:
otypes = [np.asarray(result).dtype for result in results]
outputs = _create_arrays(broadcast_shape, dim_sizes,
self._out, otypes)
for output, result in zip(outputs, results):
output[index] = result
if outputs is None:
# did not call the function even once
if otypes is None:
raise ValueError('cannot call `vectorize` on size 0 inputs '
'unless `otypes` is set')
if builtins.any(dim not in dim_sizes
for dims in self._out
for dim in dims):
raise ValueError('cannot call `vectorize` with a signature '
'including new output dimensions on size 0 '
'inputs')
outputs = _create_arrays(broadcast_shape, dim_sizes,
self._out, otypes)
return outputs[0] if nout == 1 else outputs
def parallel(self, *args, **kwargs):
broadcast_shape, dim_sizes, args = self._process_args(args, kwargs)
outputs = None
otypes = self.otypes
nout = len(self._out)
if self.parallel_type == 'threads':
pool = thread_pool(os.cpu_count())
elif self.parallel_type == 'processes':
pool = process_pool(os.cpu_count())
futures = {}
for index in np.ndindex(*broadcast_shape):
i_args = ((arg[index] if _in is not None else arg)
for _in, arg in zip(self._in, args))
if self.sendindex:
futures[pool.submit(self.pyfunc, index, *i_args)] = index
else:
futures[pool.submit(self.pyfunc, *i_args)] = index
for f in as_completed(futures):
index = futures[f]
results = f.result()
n_results = len(results) if isinstance(results, tuple) else 1
if nout != n_results:
raise ValueError(
'wrong number of outputs from pyfunc: expected %r, got %r'
% (nout, n_results))
if nout == 1:
results = (results,)
if outputs is None:
for result, core_dims in zip(results, self._out):
_update_dim_sizes(dim_sizes, result, core_dims)
if otypes is None:
otypes = [np.asarray(result).dtype for result in results]
outputs = _create_arrays(broadcast_shape, dim_sizes,
self._out, otypes)
for output, result in zip(outputs, results):
output[index] = result
if outputs is None:
# did not call the function even once
if otypes is None:
raise ValueError('cannot call `vectorize` on size 0 inputs '
'unless `otypes` is set')
if builtins.any(dim not in dim_sizes
for dims in self._out
for dim in dims):
raise ValueError('cannot call `vectorize` with a signature '
'including new output dimensions on size 0 '
'inputs')
outputs = _create_arrays(broadcast_shape, dim_sizes,
self._out, otypes)
return outputs[0] if nout == 1 else outputs
class asparallel(object):
def __init__(self, pyfunc, default='parallelenv', evn='MEGA_PARALLIZE'):
self.pyfunc = pyfunc
self.default = default
self.evn = evn
self.__wrapped__ = pyfunc
def __call__(self, *args, **kwargs):
if self.default is 'parallel':
return self.parallel(*args, **kwargs)
if self.default is 'sequential':
return self.sequential(*args, **kwargs)
if self.default is 'vectorized':
return self.vectorized(*args, **kwargs)
if self.default is 'parallelenv':
if self.evn in os.environ and not os.environ[self.evn]:
return self.vectorized(*args, **kwargs)
else:
return self.parallel(*args, **kwargs)
def parallel(self, *args, **kwargs):
def wrap_parallels(parallelfunc):
return parallelfunc.parallel
return self.pyfunc(wrap_parallels, *args, **kwargs)
def sequential(self, *args, **kwargs):
def wrap_parallels(parallelfunc):
return parallelfunc.sequential
return self.pyfunc(wrap_parallels, *args, **kwargs)
def vectorized(self, *args, **kwargs):
def wrap_parallels(parallelfunc):
return parallelfunc.vectorized
return self.pyfunc(wrap_parallels, *args, **kwargs)
| 38.021798
| 78
| 0.560484
|
import numpy as np
from numpy.core.numerictypes import typecodes
import inspect
import functools
import re
import builtins
import os
from concurrent.futures import ThreadPoolExecutor as thread_pool
from concurrent.futures import ProcessPoolExecutor as process_pool
from concurrent.futures import as_completed
def _iterable(y):
try:
iter(y)
except TypeError:
return False
return True
_DIMENSION_NAME = r'\w+'
_CORE_DIMENSION_LIST = '(?:{0:}(?:,{0:})*)?'.format(_DIMENSION_NAME)
_VECTOR_ARGUMENT = r'(\({}\))'.format(_CORE_DIMENSION_LIST)
_EXCLUDED_ARGUMENT = r'(_)'
_ARGUMENT = r'(?:{0:}|{1:})'.format(_VECTOR_ARGUMENT, _EXCLUDED_ARGUMENT)
_ARGUMENT_LIST = '{0:}(?:,{0:})*'.format(_ARGUMENT)
_OUT_ARGUMENT_LIST = '{0:}(?:,{0:})*'.format(_VECTOR_ARGUMENT)
_SIGNATURE = '^{0:}->{1:}$'.format(_ARGUMENT_LIST, _OUT_ARGUMENT_LIST)
def _parse_signature(signature):
if not re.match(_SIGNATURE, signature):
raise ValueError(
'not a valid gufunc signature: {}'.format(signature))
inargs, outargs = [], []
_in, _out = signature.split('->')
for arg in re.findall(_ARGUMENT, _in):
if arg[1] == "_":
inargs.append(None)
else:
inarg = []
for match in re.findall(_DIMENSION_NAME, arg[0]):
try:
inarg.append(int(match))
except:
inarg.append(match)
inargs.append(tuple(inarg))
for arg in re.findall(_ARGUMENT, _out):
if arg[1] == "_":
outargs.append(None)
else:
outarg = []
for match in re.findall(_DIMENSION_NAME, arg[0]):
try:
outarg.append(int(match))
except:
outarg.append(match)
outargs.append(tuple(outarg))
return inargs, outargs
def _update_dim_sizes(dim_sizes, arg, core_dims):
if not core_dims:
return
num_core_dims = len(core_dims)
if arg.ndim < num_core_dims:
raise ValueError('%d-dimensional argument does not have enough '
'dimensions for all core dimensions %r'
% (arg.ndim, core_dims))
core_shape = arg.shape[-num_core_dims:]
for dim, size in zip(core_dims, core_shape):
if dim in dim_sizes:
if size != dim_sizes[dim]:
raise ValueError('inconsistent size for core dimension'
' %r: %r vs %r'
% (dim, size, dim_sizes[dim]))
elif isinstance(dim, str):
dim_sizes[dim] = size
elif dim != size:
raise ValueError('inconsistent size for core dimension: %r vs %r'
% (dim, size))
def _parse_input_dimensions(args, arg_dims):
dim_sizes = {}
broadcast_args = []
for a, dims in zip(args, arg_dims):
if dims is None:
broadcast_args.append(None)
continue
_update_dim_sizes(dim_sizes, a, dims)
ndim = a.ndim - len(dims)
dummy_array = np.lib.stride_tricks.as_strided(0, a.shape[:ndim])
broadcast_args.append(dummy_array)
broadcast_shape = np.lib.stride_tricks._broadcast_shape(*broadcast_args)
return broadcast_shape, dim_sizes
def _calculate_shapes(broadcast_shape, dim_sizes, list_of_core_dims):
return [(broadcast_shape + tuple((dim_sizes[dim]
if isinstance(dim, str) else dim)
for dim in core_dims)
if core_dims is not None else None)
for core_dims in list_of_core_dims]
def _create_arrays(broadcast_shape, dim_sizes, list_of_core_dims, dtypes):
shapes = _calculate_shapes(broadcast_shape, dim_sizes, list_of_core_dims)
arrays = tuple(np.empty(shape, dtype=dtype)
for shape, dtype in zip(shapes, dtypes))
return arrays
def parallize(signature, otypes=None, doc=None, default='parallelenv',
evn='MEGA_PARALLIZE', isvec=False, parallel='threads',
sendindex=False):
def wrap_parallized(pyfunc):
return parallized(pyfunc, signature, otypes, doc, default,
evn, isvec, parallel, sendindex)
return wrap_parallized
class parallized(object):
def __init__(self, pyfunc, signature, otypes=None, doc=None,
default='parallel', evn='MEGA_PARALLIZE', isvec=False,
parallel_type='threads', sendindex=False):
self.signature = signature
self.default = default
self.evn = evn
self.isvec = isvec
self.parallel_type = parallel_type
self.sendindex = sendindex
self._ufunc = None
if doc is not None:
self.__doc__ = doc
else:
self.__doc__ = pyfunc.__doc__
if isinstance(otypes, str):
for char in otypes:
if char not in typecodes['All']:
raise ValueError("Invalid otype specified: %s" % (char,))
elif _iterable(otypes):
otypes = ''.join([np.dtype(x).char for x in otypes])
elif otypes is not None:
raise ValueError("Invalid otype specification")
self.otypes = otypes
self._in, self._out = _parse_signature(signature)
self.excluded = [(a is None) for a in self._in]
self.pyfunc = pyfunc
self.__wrapped__ = pyfunc
self.parameters = [k for k in inspect.signature(pyfunc).parameters]
if self.sendindex:
self.parameters = self.parameters[1:]
def _process_args(self, args, kwargs):
givenargs = list(args)
allargs = []
for p in self.parameters:
if p in kwargs:
allargs.append(kwargs.pop(p))
else:
if len(args) == 0:
msg = 'expected {}, got {}'.format(len(self.parameters),
len(givenargs))
raise TypeError("Missing positional arguments: " + msg)
allargs.append(args[0])
args = args[1:]
if len(kwargs) != 0:
raise TypeError("Unknown keyword arguments {}!".format(kwargs))
if len(args) != 0:
msg = 'expected {}, got {}'.format(len(self.parameters),
len(givenargs))
raise TypeError("Too many positional arguments: " + msg)
args = tuple((np.asanyarray(a) if not ex else a)
for a, ex in zip(allargs, self.excluded))
broadcast_shape, dim_sizes = _parse_input_dimensions(args, self._in)
input_shapes = _calculate_shapes(broadcast_shape, dim_sizes, self._in)
args = [(np.broadcast_to(arg, shape, subok=True)
if shape is not None else arg)
for arg, shape in zip(args, input_shapes)]
return broadcast_shape, dim_sizes, args
def __call__(self, *args, **kwargs):
if self.default is 'parallel':
return self.parallel(*args, **kwargs)
if self.default is 'sequential':
return self.sequential(*args, **kwargs)
if self.default is 'vectorized':
return self.vectorized(*args, **kwargs)
if self.default is 'parallelenv':
if self.evn in os.environ and not os.environ[self.evn]:
return self.vectorized(*args, **kwargs)
else:
return self.parallel(*args, **kwargs)
def vectorized(self, *args, **kwargs):
if self.isvec:
if self.sendindex:
return self.pyfunc(None, *args, **kwargs)
else:
return self.pyfunc(*args, **kwargs)
else:
return self.sequential(*args, **kwargs)
def sequential(self, *args, **kwargs):
broadcast_shape, dim_sizes, args = self._process_args(args, kwargs)
outputs = None
otypes = self.otypes
nout = len(self._out)
for index in np.ndindex(*broadcast_shape):
i_args = ((arg[index] if _in is not None else arg)
for _in, arg in zip(self._in, args))
if self.sendindex:
results = self.pyfunc(index, *i_args)
else:
results = self.pyfunc(*i_args)
n_results = len(results) if isinstance(results, tuple) else 1
if nout != n_results:
raise ValueError(
'wrong number of outputs from pyfunc: expected %r, got %r'
% (nout, n_results))
if nout == 1:
results = (results,)
if outputs is None:
for result, core_dims in zip(results, self._out):
_update_dim_sizes(dim_sizes, result, core_dims)
if otypes is None:
otypes = [np.asarray(result).dtype for result in results]
outputs = _create_arrays(broadcast_shape, dim_sizes,
self._out, otypes)
for output, result in zip(outputs, results):
output[index] = result
if outputs is None:
if otypes is None:
raise ValueError('cannot call `vectorize` on size 0 inputs '
'unless `otypes` is set')
if builtins.any(dim not in dim_sizes
for dims in self._out
for dim in dims):
raise ValueError('cannot call `vectorize` with a signature '
'including new output dimensions on size 0 '
'inputs')
outputs = _create_arrays(broadcast_shape, dim_sizes,
self._out, otypes)
return outputs[0] if nout == 1 else outputs
def parallel(self, *args, **kwargs):
broadcast_shape, dim_sizes, args = self._process_args(args, kwargs)
outputs = None
otypes = self.otypes
nout = len(self._out)
if self.parallel_type == 'threads':
pool = thread_pool(os.cpu_count())
elif self.parallel_type == 'processes':
pool = process_pool(os.cpu_count())
futures = {}
for index in np.ndindex(*broadcast_shape):
i_args = ((arg[index] if _in is not None else arg)
for _in, arg in zip(self._in, args))
if self.sendindex:
futures[pool.submit(self.pyfunc, index, *i_args)] = index
else:
futures[pool.submit(self.pyfunc, *i_args)] = index
for f in as_completed(futures):
index = futures[f]
results = f.result()
n_results = len(results) if isinstance(results, tuple) else 1
if nout != n_results:
raise ValueError(
'wrong number of outputs from pyfunc: expected %r, got %r'
% (nout, n_results))
if nout == 1:
results = (results,)
if outputs is None:
for result, core_dims in zip(results, self._out):
_update_dim_sizes(dim_sizes, result, core_dims)
if otypes is None:
otypes = [np.asarray(result).dtype for result in results]
outputs = _create_arrays(broadcast_shape, dim_sizes,
self._out, otypes)
for output, result in zip(outputs, results):
output[index] = result
if outputs is None:
if otypes is None:
raise ValueError('cannot call `vectorize` on size 0 inputs '
'unless `otypes` is set')
if builtins.any(dim not in dim_sizes
for dims in self._out
for dim in dims):
raise ValueError('cannot call `vectorize` with a signature '
'including new output dimensions on size 0 '
'inputs')
outputs = _create_arrays(broadcast_shape, dim_sizes,
self._out, otypes)
return outputs[0] if nout == 1 else outputs
class asparallel(object):
def __init__(self, pyfunc, default='parallelenv', evn='MEGA_PARALLIZE'):
self.pyfunc = pyfunc
self.default = default
self.evn = evn
self.__wrapped__ = pyfunc
def __call__(self, *args, **kwargs):
if self.default is 'parallel':
return self.parallel(*args, **kwargs)
if self.default is 'sequential':
return self.sequential(*args, **kwargs)
if self.default is 'vectorized':
return self.vectorized(*args, **kwargs)
if self.default is 'parallelenv':
if self.evn in os.environ and not os.environ[self.evn]:
return self.vectorized(*args, **kwargs)
else:
return self.parallel(*args, **kwargs)
def parallel(self, *args, **kwargs):
def wrap_parallels(parallelfunc):
return parallelfunc.parallel
return self.pyfunc(wrap_parallels, *args, **kwargs)
def sequential(self, *args, **kwargs):
def wrap_parallels(parallelfunc):
return parallelfunc.sequential
return self.pyfunc(wrap_parallels, *args, **kwargs)
def vectorized(self, *args, **kwargs):
def wrap_parallels(parallelfunc):
return parallelfunc.vectorized
return self.pyfunc(wrap_parallels, *args, **kwargs)
| true
| true
|
f7058c86361e6fb62602fa1810c1f92feb394991
| 21,195
|
py
|
Python
|
tests/reflection.py
|
onyb/peewee
|
323983c2ecf2ec70a14ed78ddd00cf5cd17d56e2
|
[
"MIT"
] | 1
|
2019-11-17T04:55:26.000Z
|
2019-11-17T04:55:26.000Z
|
tests/reflection.py
|
onyb/peewee
|
323983c2ecf2ec70a14ed78ddd00cf5cd17d56e2
|
[
"MIT"
] | null | null | null |
tests/reflection.py
|
onyb/peewee
|
323983c2ecf2ec70a14ed78ddd00cf5cd17d56e2
|
[
"MIT"
] | 1
|
2019-07-07T20:57:22.000Z
|
2019-07-07T20:57:22.000Z
|
import datetime
import os
import re
from peewee import *
from playhouse.reflection import *
from .base import IS_SQLITE_OLD
from .base import ModelTestCase
from .base import TestModel
from .base import db
from .base import requires_models
from .base import requires_sqlite
from .base import skip_if
from .base_models import Tweet
from .base_models import User
class ColTypes(TestModel):
f1 = BigIntegerField(index=True)
f2 = BlobField()
f3 = BooleanField()
f4 = CharField(max_length=50)
f5 = DateField()
f6 = DateTimeField()
f7 = DecimalField()
f8 = DoubleField()
f9 = FloatField()
f10 = IntegerField(unique=True)
f11 = AutoField()
f12 = TextField()
f13 = TimeField()
class Meta:
indexes = (
(('f10', 'f11'), True),
(('f11', 'f8', 'f13'), False),
)
class Nullable(TestModel):
nullable_cf = CharField(null=True)
nullable_if = IntegerField(null=True)
class RelModel(TestModel):
col_types = ForeignKeyField(ColTypes, backref='foo')
col_types_nullable = ForeignKeyField(ColTypes, null=True)
class FKPK(TestModel):
col_types = ForeignKeyField(ColTypes, primary_key=True)
class Underscores(TestModel):
_id = AutoField()
_name = CharField()
class Category(TestModel):
name = CharField(max_length=10)
parent = ForeignKeyField('self', null=True)
class Nugget(TestModel):
category_id = ForeignKeyField(Category, column_name='category_id')
category = CharField()
class BaseReflectionTestCase(ModelTestCase):
def setUp(self):
super(BaseReflectionTestCase, self).setUp()
self.introspector = Introspector.from_database(self.database)
class TestReflection(BaseReflectionTestCase):
requires = [ColTypes, Nullable, RelModel, FKPK, Underscores, Category,
Nugget]
def test_generate_models(self):
models = self.introspector.generate_models()
self.assertTrue(set((
'category',
'col_types',
'fkpk',
'nugget',
'nullable',
'rel_model',
'underscores')).issubset(set(models)))
def assertIsInstance(obj, klass):
self.assertTrue(isinstance(obj, klass))
category = models['category']
self.assertEqual(
sorted(category._meta.fields),
['id', 'name', 'parent'])
assertIsInstance(category.id, AutoField)
assertIsInstance(category.name, CharField)
assertIsInstance(category.parent, ForeignKeyField)
self.assertEqual(category.parent.rel_model, category)
fkpk = models['fkpk']
self.assertEqual(sorted(fkpk._meta.fields), ['col_types'])
assertIsInstance(fkpk.col_types, ForeignKeyField)
self.assertEqual(fkpk.col_types.rel_model, models['col_types'])
self.assertTrue(fkpk.col_types.primary_key)
relmodel = models['rel_model']
self.assertEqual(
sorted(relmodel._meta.fields),
['col_types', 'col_types_nullable', 'id'])
assertIsInstance(relmodel.col_types, ForeignKeyField)
assertIsInstance(relmodel.col_types_nullable, ForeignKeyField)
self.assertFalse(relmodel.col_types.null)
self.assertTrue(relmodel.col_types_nullable.null)
self.assertEqual(relmodel.col_types.rel_model,
models['col_types'])
self.assertEqual(relmodel.col_types_nullable.rel_model,
models['col_types'])
@requires_sqlite
def test_generate_models_indexes(self):
models = self.introspector.generate_models()
self.assertEqual(models['fkpk']._meta.indexes, [])
self.assertEqual(models['rel_model']._meta.indexes, [])
self.assertEqual(models['category']._meta.indexes, [])
col_types = models['col_types']
indexed = set(['f1'])
unique = set(['f10'])
for field in col_types._meta.sorted_fields:
self.assertEqual(field.index, field.name in indexed)
self.assertEqual(field.unique, field.name in unique)
indexes = col_types._meta.indexes
self.assertEqual(sorted(indexes), [
(['f10', 'f11'], True),
(['f11', 'f8', 'f13'], False),
])
def test_table_subset(self):
models = self.introspector.generate_models(table_names=[
'category',
'col_types',
'foobarbaz'])
self.assertEqual(sorted(models.keys()), ['category', 'col_types'])
@requires_sqlite
def test_sqlite_fk_re(self):
user_id_tests = [
'FOREIGN KEY("user_id") REFERENCES "users"("id")',
'FOREIGN KEY(user_id) REFERENCES users(id)',
'FOREIGN KEY ([user_id]) REFERENCES [users] ([id])',
'"user_id" NOT NULL REFERENCES "users" ("id")',
'user_id not null references users (id)',
]
fk_pk_tests = [
('"col_types_id" INTEGER NOT NULL PRIMARY KEY REFERENCES '
'"coltypes" ("f11")'),
'FOREIGN KEY ("col_types_id") REFERENCES "coltypes" ("f11")',
]
regex = SqliteMetadata.re_foreign_key
for test in user_id_tests:
match = re.search(regex, test, re.I)
self.assertEqual(match.groups(), (
'user_id', 'users', 'id',
))
for test in fk_pk_tests:
match = re.search(regex, test, re.I)
self.assertEqual(match.groups(), (
'col_types_id', 'coltypes', 'f11',
))
def test_make_column_name(self):
# Tests for is_foreign_key=False.
tests = (
('Column', 'column'),
('Foo_id', 'foo_id'),
('foo_id', 'foo_id'),
('foo_id_id', 'foo_id_id'),
('foo', 'foo'),
('_id', '_id'),
('a123', 'a123'),
('and', 'and_'),
('Class', 'class_'),
('Class_ID', 'class_id'),
('camelCase', 'camel_case'),
('ABCdefGhi', 'ab_cdef_ghi'),
)
for col_name, expected in tests:
self.assertEqual(
self.introspector.make_column_name(col_name), expected)
# Tests for is_foreign_key=True.
tests = (
('Foo_id', 'foo'),
('foo_id', 'foo'),
('foo_id_id', 'foo_id'),
('foo', 'foo'),
('_id', '_id'),
('a123', 'a123'),
('and', 'and_'),
('Class', 'class_'),
('Class_ID', 'class_'),
('camelCase', 'camel_case'),
('ABCdefGhi', 'ab_cdef_ghi'),
)
for col_name, expected in tests:
self.assertEqual(
self.introspector.make_column_name(col_name, True), expected)
def test_make_model_name(self):
tests = (
('Table', 'Table'),
('table', 'Table'),
('table_baz', 'TableBaz'),
('foo__bar__baz2', 'FooBarBaz2'),
('foo12_3', 'Foo123'),
)
for table_name, expected in tests:
self.assertEqual(
self.introspector.make_model_name(table_name), expected)
def test_col_types(self):
(columns,
primary_keys,
foreign_keys,
model_names,
indexes) = self.introspector.introspect()
expected = (
('col_types', (
('f1', (BigIntegerField, IntegerField), False),
# There do not appear to be separate constants for the blob and
# text field types in MySQL's drivers. See GH#1034.
('f2', (BlobField, TextField), False),
('f3', (BooleanField, IntegerField), False),
('f4', CharField, False),
('f5', DateField, False),
('f6', DateTimeField, False),
('f7', DecimalField, False),
('f8', (DoubleField, FloatField), False),
('f9', FloatField, False),
('f10', IntegerField, False),
('f11', AutoField, False),
('f12', TextField, False),
('f13', TimeField, False))),
('rel_model', (
('col_types_id', ForeignKeyField, False),
('col_types_nullable_id', ForeignKeyField, True))),
('nugget', (
('category_id', ForeignKeyField, False),
('category', CharField, False))),
('nullable', (
('nullable_cf', CharField, True),
('nullable_if', IntegerField, True))),
('fkpk', (
('col_types_id', ForeignKeyField, False),)),
('underscores', (
('_id', AutoField, False),
('_name', CharField, False))),
('category', (
('name', CharField, False),
('parent_id', ForeignKeyField, True))),
)
for table_name, expected_columns in expected:
introspected_columns = columns[table_name]
for field_name, field_class, is_null in expected_columns:
if not isinstance(field_class, (list, tuple)):
field_class = (field_class,)
column = introspected_columns[field_name]
self.assertTrue(column.field_class in field_class,
"%s in %s" % (column.field_class, field_class))
self.assertEqual(column.nullable, is_null)
def test_foreign_keys(self):
(columns,
primary_keys,
foreign_keys,
model_names,
indexes) = self.introspector.introspect()
self.assertEqual(foreign_keys['col_types'], [])
rel_model = foreign_keys['rel_model']
self.assertEqual(len(rel_model), 2)
fkpk = foreign_keys['fkpk']
self.assertEqual(len(fkpk), 1)
fkpk_fk = fkpk[0]
self.assertEqual(fkpk_fk.table, 'fkpk')
self.assertEqual(fkpk_fk.column, 'col_types_id')
self.assertEqual(fkpk_fk.dest_table, 'col_types')
self.assertEqual(fkpk_fk.dest_column, 'f11')
category = foreign_keys['category']
self.assertEqual(len(category), 1)
category_fk = category[0]
self.assertEqual(category_fk.table, 'category')
self.assertEqual(category_fk.column, 'parent_id')
self.assertEqual(category_fk.dest_table, 'category')
self.assertEqual(category_fk.dest_column, 'id')
def test_table_names(self):
(columns,
primary_keys,
foreign_keys,
model_names,
indexes) = self.introspector.introspect()
names = (
('col_types', 'ColTypes'),
('nullable', 'Nullable'),
('rel_model', 'RelModel'),
('fkpk', 'Fkpk'))
for k, v in names:
self.assertEqual(model_names[k], v)
def test_column_meta(self):
(columns,
primary_keys,
foreign_keys,
model_names,
indexes) = self.introspector.introspect()
rel_model = columns['rel_model']
col_types_id = rel_model['col_types_id']
self.assertEqual(col_types_id.get_field_parameters(), {
'column_name': "'col_types_id'",
'model': 'ColTypes',
'field': "'f11'",
})
col_types_nullable_id = rel_model['col_types_nullable_id']
self.assertEqual(col_types_nullable_id.get_field_parameters(), {
'column_name': "'col_types_nullable_id'",
'null': True,
'backref': "'col_types_col_types_nullable_set'",
'model': 'ColTypes',
'field': "'f11'",
})
fkpk = columns['fkpk']
self.assertEqual(fkpk['col_types_id'].get_field_parameters(), {
'column_name': "'col_types_id'",
'model': 'ColTypes',
'primary_key': True,
'field': "'f11'"})
category = columns['category']
parent_id = category['parent_id']
self.assertEqual(parent_id.get_field_parameters(), {
'column_name': "'parent_id'",
'null': True,
'model': "'self'",
'field': "'id'",
})
nugget = columns['nugget']
category_fk = nugget['category_id']
self.assertEqual(category_fk.name, 'category_id')
self.assertEqual(category_fk.get_field_parameters(), {
'field': "'id'",
'model': 'Category',
'column_name': "'category_id'",
})
category = nugget['category']
self.assertEqual(category.name, 'category')
def test_get_field(self):
(columns,
primary_keys,
foreign_keys,
model_names,
indexes) = self.introspector.introspect()
expected = (
('col_types', (
('f1', ('f1 = BigIntegerField(index=True)',
'f1 = IntegerField(index=True)')),
('f2', ('f2 = BlobField()', 'f2 = TextField()')),
('f4', 'f4 = CharField()'),
('f5', 'f5 = DateField()'),
('f6', 'f6 = DateTimeField()'),
('f7', 'f7 = DecimalField()'),
('f10', 'f10 = IntegerField(unique=True)'),
('f11', 'f11 = AutoField()'),
('f12', ('f12 = TextField()', 'f12 = BlobField()')),
('f13', 'f13 = TimeField()'),
)),
('nullable', (
('nullable_cf', 'nullable_cf = '
'CharField(null=True)'),
('nullable_if', 'nullable_if = IntegerField(null=True)'),
)),
('fkpk', (
('col_types_id', 'col_types = ForeignKeyField('
"column_name='col_types_id', field='f11', model=ColTypes, "
'primary_key=True)'),
)),
('nugget', (
('category_id', 'category_id = ForeignKeyField('
"column_name='category_id', field='id', model=Category)"),
('category', 'category = CharField()'),
)),
('rel_model', (
('col_types_id', 'col_types = ForeignKeyField('
"column_name='col_types_id', field='f11', model=ColTypes)"),
('col_types_nullable_id', 'col_types_nullable = '
"ForeignKeyField(backref='col_types_col_types_nullable_set', "
"column_name='col_types_nullable_id', field='f11', "
'model=ColTypes, null=True)'),
)),
('underscores', (
('_id', '_id = AutoField()'),
('_name', '_name = CharField()'),
)),
('category', (
('name', 'name = CharField()'),
('parent_id', 'parent = ForeignKeyField('
"column_name='parent_id', field='id', model='self', "
'null=True)'),
)),
)
for table, field_data in expected:
for field_name, fields in field_data:
if not isinstance(fields, tuple):
fields = (fields,)
actual = columns[table][field_name].get_field()
self.assertTrue(actual in fields,
'%s not in %s' % (actual, fields))
class EventLog(TestModel):
data = CharField(constraints=[SQL('DEFAULT \'\'')])
timestamp = DateTimeField(constraints=[SQL('DEFAULT current_timestamp')])
flags = IntegerField(constraints=[SQL('DEFAULT 0')])
misc = TextField(constraints=[SQL('DEFAULT \'foo\'')])
class DefaultVals(TestModel):
key = CharField(constraints=[SQL('DEFAULT \'foo\'')])
value = IntegerField(constraints=[SQL('DEFAULT 0')])
class Meta:
primary_key = CompositeKey('key', 'value')
class TestReflectDefaultValues(BaseReflectionTestCase):
requires = [DefaultVals, EventLog]
@requires_sqlite
def test_default_values(self):
models = self.introspector.generate_models()
default_vals = models['default_vals']
create_table = (
'CREATE TABLE IF NOT EXISTS "default_vals" ('
'"key" VARCHAR(255) NOT NULL DEFAULT \'foo\', '
'"value" INTEGER NOT NULL DEFAULT 0, '
'PRIMARY KEY ("key", "value"))')
# Re-create table using the introspected schema.
self.assertSQL(default_vals._schema._create_table(), create_table, [])
default_vals.drop_table()
default_vals.create_table()
# Verify that the introspected schema has not changed.
models = self.introspector.generate_models()
default_vals = models['default_vals']
self.assertSQL(default_vals._schema._create_table(), create_table, [])
@requires_sqlite
def test_default_values_extended(self):
models = self.introspector.generate_models()
eventlog = models['event_log']
create_table = (
'CREATE TABLE IF NOT EXISTS "event_log" ('
'"id" INTEGER NOT NULL PRIMARY KEY, '
'"data" VARCHAR(255) NOT NULL DEFAULT \'\', '
'"timestamp" DATETIME NOT NULL DEFAULT current_timestamp, '
'"flags" INTEGER NOT NULL DEFAULT 0, '
'"misc" TEXT NOT NULL DEFAULT \'foo\')')
# Re-create table using the introspected schema.
self.assertSQL(eventlog._schema._create_table(), create_table, [])
eventlog.drop_table()
eventlog.create_table()
# Verify that the introspected schema has not changed.
models = self.introspector.generate_models()
eventlog = models['event_log']
self.assertSQL(eventlog._schema._create_table(), create_table, [])
class TestReflectionDependencies(BaseReflectionTestCase):
requires = [User, Tweet]
def test_generate_dependencies(self):
models = self.introspector.generate_models(table_names=['tweet'])
self.assertEqual(set(models), set(('users', 'tweet')))
IUser = models['users']
ITweet = models['tweet']
self.assertEqual(set(ITweet._meta.fields), set((
'id', 'user', 'content', 'timestamp')))
self.assertEqual(set(IUser._meta.fields), set(('id', 'username')))
self.assertTrue(ITweet.user.rel_model is IUser)
self.assertTrue(ITweet.user.rel_field is IUser.id)
def test_ignore_backrefs(self):
models = self.introspector.generate_models(table_names=['users'])
self.assertEqual(set(models), set(('users',)))
class Note(TestModel):
content = TextField()
timestamp = DateTimeField(default=datetime.datetime.now)
status = IntegerField()
class TestReflectViews(BaseReflectionTestCase):
requires = [Note]
def setUp(self):
super(TestReflectViews, self).setUp()
self.database.execute_sql('CREATE VIEW notes_public AS '
'SELECT content, timestamp FROM note '
'WHERE status = 1 ORDER BY timestamp DESC')
def tearDown(self):
self.database.execute_sql('DROP VIEW notes_public')
super(TestReflectViews, self).tearDown()
def test_views_ignored_default(self):
models = self.introspector.generate_models()
self.assertFalse('notes_public' in models)
def test_introspect_view(self):
models = self.introspector.generate_models(include_views=True)
self.assertTrue('notes_public' in models)
NotesPublic = models['notes_public']
self.assertEqual(sorted(NotesPublic._meta.fields),
['content', 'timestamp'])
self.assertTrue(isinstance(NotesPublic.content, TextField))
self.assertTrue(isinstance(NotesPublic.timestamp, DateTimeField))
@skip_if(IS_SQLITE_OLD)
def test_introspect_view_integration(self):
for i, (ct, st) in enumerate([('n1', 1), ('n2', 2), ('n3', 1)]):
Note.create(content=ct, status=st,
timestamp=datetime.datetime(2018, 1, 1 + i))
NP = self.introspector.generate_models(
table_names=['notes_public'], include_views=True)['notes_public']
self.assertEqual([(np.content, np.timestamp) for np in NP.select()], [
('n3', datetime.datetime(2018, 1, 3)),
('n1', datetime.datetime(2018, 1, 1))])
class Event(TestModel):
key = TextField()
timestamp = DateTimeField(index=True)
metadata = TextField(default='')
class TestInteractiveHelpers(ModelTestCase):
requires = [Category, Event]
def test_generate_models(self):
M = generate_models(self.database)
self.assertTrue('category' in M)
self.assertTrue('event' in M)
def assertFields(m, expected):
actual = [(f.name, f.field_type) for f in m._meta.sorted_fields]
self.assertEqual(actual, expected)
assertFields(M['category'], [('id', 'AUTO'), ('name', 'VARCHAR'),
('parent', 'INT')])
assertFields(M['event'], [
('id', 'AUTO'),
('key', 'TEXT'),
('timestamp', 'DATETIME'),
('metadata', 'TEXT')])
| 35.325
| 79
| 0.566407
|
import datetime
import os
import re
from peewee import *
from playhouse.reflection import *
from .base import IS_SQLITE_OLD
from .base import ModelTestCase
from .base import TestModel
from .base import db
from .base import requires_models
from .base import requires_sqlite
from .base import skip_if
from .base_models import Tweet
from .base_models import User
class ColTypes(TestModel):
f1 = BigIntegerField(index=True)
f2 = BlobField()
f3 = BooleanField()
f4 = CharField(max_length=50)
f5 = DateField()
f6 = DateTimeField()
f7 = DecimalField()
f8 = DoubleField()
f9 = FloatField()
f10 = IntegerField(unique=True)
f11 = AutoField()
f12 = TextField()
f13 = TimeField()
class Meta:
indexes = (
(('f10', 'f11'), True),
(('f11', 'f8', 'f13'), False),
)
class Nullable(TestModel):
nullable_cf = CharField(null=True)
nullable_if = IntegerField(null=True)
class RelModel(TestModel):
col_types = ForeignKeyField(ColTypes, backref='foo')
col_types_nullable = ForeignKeyField(ColTypes, null=True)
class FKPK(TestModel):
col_types = ForeignKeyField(ColTypes, primary_key=True)
class Underscores(TestModel):
_id = AutoField()
_name = CharField()
class Category(TestModel):
name = CharField(max_length=10)
parent = ForeignKeyField('self', null=True)
class Nugget(TestModel):
category_id = ForeignKeyField(Category, column_name='category_id')
category = CharField()
class BaseReflectionTestCase(ModelTestCase):
def setUp(self):
super(BaseReflectionTestCase, self).setUp()
self.introspector = Introspector.from_database(self.database)
class TestReflection(BaseReflectionTestCase):
requires = [ColTypes, Nullable, RelModel, FKPK, Underscores, Category,
Nugget]
def test_generate_models(self):
models = self.introspector.generate_models()
self.assertTrue(set((
'category',
'col_types',
'fkpk',
'nugget',
'nullable',
'rel_model',
'underscores')).issubset(set(models)))
def assertIsInstance(obj, klass):
self.assertTrue(isinstance(obj, klass))
category = models['category']
self.assertEqual(
sorted(category._meta.fields),
['id', 'name', 'parent'])
assertIsInstance(category.id, AutoField)
assertIsInstance(category.name, CharField)
assertIsInstance(category.parent, ForeignKeyField)
self.assertEqual(category.parent.rel_model, category)
fkpk = models['fkpk']
self.assertEqual(sorted(fkpk._meta.fields), ['col_types'])
assertIsInstance(fkpk.col_types, ForeignKeyField)
self.assertEqual(fkpk.col_types.rel_model, models['col_types'])
self.assertTrue(fkpk.col_types.primary_key)
relmodel = models['rel_model']
self.assertEqual(
sorted(relmodel._meta.fields),
['col_types', 'col_types_nullable', 'id'])
assertIsInstance(relmodel.col_types, ForeignKeyField)
assertIsInstance(relmodel.col_types_nullable, ForeignKeyField)
self.assertFalse(relmodel.col_types.null)
self.assertTrue(relmodel.col_types_nullable.null)
self.assertEqual(relmodel.col_types.rel_model,
models['col_types'])
self.assertEqual(relmodel.col_types_nullable.rel_model,
models['col_types'])
@requires_sqlite
def test_generate_models_indexes(self):
models = self.introspector.generate_models()
self.assertEqual(models['fkpk']._meta.indexes, [])
self.assertEqual(models['rel_model']._meta.indexes, [])
self.assertEqual(models['category']._meta.indexes, [])
col_types = models['col_types']
indexed = set(['f1'])
unique = set(['f10'])
for field in col_types._meta.sorted_fields:
self.assertEqual(field.index, field.name in indexed)
self.assertEqual(field.unique, field.name in unique)
indexes = col_types._meta.indexes
self.assertEqual(sorted(indexes), [
(['f10', 'f11'], True),
(['f11', 'f8', 'f13'], False),
])
def test_table_subset(self):
models = self.introspector.generate_models(table_names=[
'category',
'col_types',
'foobarbaz'])
self.assertEqual(sorted(models.keys()), ['category', 'col_types'])
@requires_sqlite
def test_sqlite_fk_re(self):
user_id_tests = [
'FOREIGN KEY("user_id") REFERENCES "users"("id")',
'FOREIGN KEY(user_id) REFERENCES users(id)',
'FOREIGN KEY ([user_id]) REFERENCES [users] ([id])',
'"user_id" NOT NULL REFERENCES "users" ("id")',
'user_id not null references users (id)',
]
fk_pk_tests = [
('"col_types_id" INTEGER NOT NULL PRIMARY KEY REFERENCES '
'"coltypes" ("f11")'),
'FOREIGN KEY ("col_types_id") REFERENCES "coltypes" ("f11")',
]
regex = SqliteMetadata.re_foreign_key
for test in user_id_tests:
match = re.search(regex, test, re.I)
self.assertEqual(match.groups(), (
'user_id', 'users', 'id',
))
for test in fk_pk_tests:
match = re.search(regex, test, re.I)
self.assertEqual(match.groups(), (
'col_types_id', 'coltypes', 'f11',
))
def test_make_column_name(self):
tests = (
('Column', 'column'),
('Foo_id', 'foo_id'),
('foo_id', 'foo_id'),
('foo_id_id', 'foo_id_id'),
('foo', 'foo'),
('_id', '_id'),
('a123', 'a123'),
('and', 'and_'),
('Class', 'class_'),
('Class_ID', 'class_id'),
('camelCase', 'camel_case'),
('ABCdefGhi', 'ab_cdef_ghi'),
)
for col_name, expected in tests:
self.assertEqual(
self.introspector.make_column_name(col_name), expected)
tests = (
('Foo_id', 'foo'),
('foo_id', 'foo'),
('foo_id_id', 'foo_id'),
('foo', 'foo'),
('_id', '_id'),
('a123', 'a123'),
('and', 'and_'),
('Class', 'class_'),
('Class_ID', 'class_'),
('camelCase', 'camel_case'),
('ABCdefGhi', 'ab_cdef_ghi'),
)
for col_name, expected in tests:
self.assertEqual(
self.introspector.make_column_name(col_name, True), expected)
def test_make_model_name(self):
tests = (
('Table', 'Table'),
('table', 'Table'),
('table_baz', 'TableBaz'),
('foo__bar__baz2', 'FooBarBaz2'),
('foo12_3', 'Foo123'),
)
for table_name, expected in tests:
self.assertEqual(
self.introspector.make_model_name(table_name), expected)
def test_col_types(self):
(columns,
primary_keys,
foreign_keys,
model_names,
indexes) = self.introspector.introspect()
expected = (
('col_types', (
('f1', (BigIntegerField, IntegerField), False),
('f2', (BlobField, TextField), False),
('f3', (BooleanField, IntegerField), False),
('f4', CharField, False),
('f5', DateField, False),
('f6', DateTimeField, False),
('f7', DecimalField, False),
('f8', (DoubleField, FloatField), False),
('f9', FloatField, False),
('f10', IntegerField, False),
('f11', AutoField, False),
('f12', TextField, False),
('f13', TimeField, False))),
('rel_model', (
('col_types_id', ForeignKeyField, False),
('col_types_nullable_id', ForeignKeyField, True))),
('nugget', (
('category_id', ForeignKeyField, False),
('category', CharField, False))),
('nullable', (
('nullable_cf', CharField, True),
('nullable_if', IntegerField, True))),
('fkpk', (
('col_types_id', ForeignKeyField, False),)),
('underscores', (
('_id', AutoField, False),
('_name', CharField, False))),
('category', (
('name', CharField, False),
('parent_id', ForeignKeyField, True))),
)
for table_name, expected_columns in expected:
introspected_columns = columns[table_name]
for field_name, field_class, is_null in expected_columns:
if not isinstance(field_class, (list, tuple)):
field_class = (field_class,)
column = introspected_columns[field_name]
self.assertTrue(column.field_class in field_class,
"%s in %s" % (column.field_class, field_class))
self.assertEqual(column.nullable, is_null)
def test_foreign_keys(self):
(columns,
primary_keys,
foreign_keys,
model_names,
indexes) = self.introspector.introspect()
self.assertEqual(foreign_keys['col_types'], [])
rel_model = foreign_keys['rel_model']
self.assertEqual(len(rel_model), 2)
fkpk = foreign_keys['fkpk']
self.assertEqual(len(fkpk), 1)
fkpk_fk = fkpk[0]
self.assertEqual(fkpk_fk.table, 'fkpk')
self.assertEqual(fkpk_fk.column, 'col_types_id')
self.assertEqual(fkpk_fk.dest_table, 'col_types')
self.assertEqual(fkpk_fk.dest_column, 'f11')
category = foreign_keys['category']
self.assertEqual(len(category), 1)
category_fk = category[0]
self.assertEqual(category_fk.table, 'category')
self.assertEqual(category_fk.column, 'parent_id')
self.assertEqual(category_fk.dest_table, 'category')
self.assertEqual(category_fk.dest_column, 'id')
def test_table_names(self):
(columns,
primary_keys,
foreign_keys,
model_names,
indexes) = self.introspector.introspect()
names = (
('col_types', 'ColTypes'),
('nullable', 'Nullable'),
('rel_model', 'RelModel'),
('fkpk', 'Fkpk'))
for k, v in names:
self.assertEqual(model_names[k], v)
def test_column_meta(self):
(columns,
primary_keys,
foreign_keys,
model_names,
indexes) = self.introspector.introspect()
rel_model = columns['rel_model']
col_types_id = rel_model['col_types_id']
self.assertEqual(col_types_id.get_field_parameters(), {
'column_name': "'col_types_id'",
'model': 'ColTypes',
'field': "'f11'",
})
col_types_nullable_id = rel_model['col_types_nullable_id']
self.assertEqual(col_types_nullable_id.get_field_parameters(), {
'column_name': "'col_types_nullable_id'",
'null': True,
'backref': "'col_types_col_types_nullable_set'",
'model': 'ColTypes',
'field': "'f11'",
})
fkpk = columns['fkpk']
self.assertEqual(fkpk['col_types_id'].get_field_parameters(), {
'column_name': "'col_types_id'",
'model': 'ColTypes',
'primary_key': True,
'field': "'f11'"})
category = columns['category']
parent_id = category['parent_id']
self.assertEqual(parent_id.get_field_parameters(), {
'column_name': "'parent_id'",
'null': True,
'model': "'self'",
'field': "'id'",
})
nugget = columns['nugget']
category_fk = nugget['category_id']
self.assertEqual(category_fk.name, 'category_id')
self.assertEqual(category_fk.get_field_parameters(), {
'field': "'id'",
'model': 'Category',
'column_name': "'category_id'",
})
category = nugget['category']
self.assertEqual(category.name, 'category')
def test_get_field(self):
(columns,
primary_keys,
foreign_keys,
model_names,
indexes) = self.introspector.introspect()
expected = (
('col_types', (
('f1', ('f1 = BigIntegerField(index=True)',
'f1 = IntegerField(index=True)')),
('f2', ('f2 = BlobField()', 'f2 = TextField()')),
('f4', 'f4 = CharField()'),
('f5', 'f5 = DateField()'),
('f6', 'f6 = DateTimeField()'),
('f7', 'f7 = DecimalField()'),
('f10', 'f10 = IntegerField(unique=True)'),
('f11', 'f11 = AutoField()'),
('f12', ('f12 = TextField()', 'f12 = BlobField()')),
('f13', 'f13 = TimeField()'),
)),
('nullable', (
('nullable_cf', 'nullable_cf = '
'CharField(null=True)'),
('nullable_if', 'nullable_if = IntegerField(null=True)'),
)),
('fkpk', (
('col_types_id', 'col_types = ForeignKeyField('
"column_name='col_types_id', field='f11', model=ColTypes, "
'primary_key=True)'),
)),
('nugget', (
('category_id', 'category_id = ForeignKeyField('
"column_name='category_id', field='id', model=Category)"),
('category', 'category = CharField()'),
)),
('rel_model', (
('col_types_id', 'col_types = ForeignKeyField('
"column_name='col_types_id', field='f11', model=ColTypes)"),
('col_types_nullable_id', 'col_types_nullable = '
"ForeignKeyField(backref='col_types_col_types_nullable_set', "
"column_name='col_types_nullable_id', field='f11', "
'model=ColTypes, null=True)'),
)),
('underscores', (
('_id', '_id = AutoField()'),
('_name', '_name = CharField()'),
)),
('category', (
('name', 'name = CharField()'),
('parent_id', 'parent = ForeignKeyField('
"column_name='parent_id', field='id', model='self', "
'null=True)'),
)),
)
for table, field_data in expected:
for field_name, fields in field_data:
if not isinstance(fields, tuple):
fields = (fields,)
actual = columns[table][field_name].get_field()
self.assertTrue(actual in fields,
'%s not in %s' % (actual, fields))
class EventLog(TestModel):
data = CharField(constraints=[SQL('DEFAULT \'\'')])
timestamp = DateTimeField(constraints=[SQL('DEFAULT current_timestamp')])
flags = IntegerField(constraints=[SQL('DEFAULT 0')])
misc = TextField(constraints=[SQL('DEFAULT \'foo\'')])
class DefaultVals(TestModel):
key = CharField(constraints=[SQL('DEFAULT \'foo\'')])
value = IntegerField(constraints=[SQL('DEFAULT 0')])
class Meta:
primary_key = CompositeKey('key', 'value')
class TestReflectDefaultValues(BaseReflectionTestCase):
requires = [DefaultVals, EventLog]
@requires_sqlite
def test_default_values(self):
models = self.introspector.generate_models()
default_vals = models['default_vals']
create_table = (
'CREATE TABLE IF NOT EXISTS "default_vals" ('
'"key" VARCHAR(255) NOT NULL DEFAULT \'foo\', '
'"value" INTEGER NOT NULL DEFAULT 0, '
'PRIMARY KEY ("key", "value"))')
# Re-create table using the introspected schema.
self.assertSQL(default_vals._schema._create_table(), create_table, [])
default_vals.drop_table()
default_vals.create_table()
# Verify that the introspected schema has not changed.
models = self.introspector.generate_models()
default_vals = models['default_vals']
self.assertSQL(default_vals._schema._create_table(), create_table, [])
@requires_sqlite
def test_default_values_extended(self):
models = self.introspector.generate_models()
eventlog = models['event_log']
create_table = (
'CREATE TABLE IF NOT EXISTS "event_log" ('
'"id" INTEGER NOT NULL PRIMARY KEY, '
'"data" VARCHAR(255) NOT NULL DEFAULT \'\', '
'"timestamp" DATETIME NOT NULL DEFAULT current_timestamp, '
'"flags" INTEGER NOT NULL DEFAULT 0, '
'"misc" TEXT NOT NULL DEFAULT \'foo\')')
# Re-create table using the introspected schema.
self.assertSQL(eventlog._schema._create_table(), create_table, [])
eventlog.drop_table()
eventlog.create_table()
# Verify that the introspected schema has not changed.
models = self.introspector.generate_models()
eventlog = models['event_log']
self.assertSQL(eventlog._schema._create_table(), create_table, [])
class TestReflectionDependencies(BaseReflectionTestCase):
requires = [User, Tweet]
def test_generate_dependencies(self):
models = self.introspector.generate_models(table_names=['tweet'])
self.assertEqual(set(models), set(('users', 'tweet')))
IUser = models['users']
ITweet = models['tweet']
self.assertEqual(set(ITweet._meta.fields), set((
'id', 'user', 'content', 'timestamp')))
self.assertEqual(set(IUser._meta.fields), set(('id', 'username')))
self.assertTrue(ITweet.user.rel_model is IUser)
self.assertTrue(ITweet.user.rel_field is IUser.id)
def test_ignore_backrefs(self):
models = self.introspector.generate_models(table_names=['users'])
self.assertEqual(set(models), set(('users',)))
class Note(TestModel):
content = TextField()
timestamp = DateTimeField(default=datetime.datetime.now)
status = IntegerField()
class TestReflectViews(BaseReflectionTestCase):
requires = [Note]
def setUp(self):
super(TestReflectViews, self).setUp()
self.database.execute_sql('CREATE VIEW notes_public AS '
'SELECT content, timestamp FROM note '
'WHERE status = 1 ORDER BY timestamp DESC')
def tearDown(self):
self.database.execute_sql('DROP VIEW notes_public')
super(TestReflectViews, self).tearDown()
def test_views_ignored_default(self):
models = self.introspector.generate_models()
self.assertFalse('notes_public' in models)
def test_introspect_view(self):
models = self.introspector.generate_models(include_views=True)
self.assertTrue('notes_public' in models)
NotesPublic = models['notes_public']
self.assertEqual(sorted(NotesPublic._meta.fields),
['content', 'timestamp'])
self.assertTrue(isinstance(NotesPublic.content, TextField))
self.assertTrue(isinstance(NotesPublic.timestamp, DateTimeField))
@skip_if(IS_SQLITE_OLD)
def test_introspect_view_integration(self):
for i, (ct, st) in enumerate([('n1', 1), ('n2', 2), ('n3', 1)]):
Note.create(content=ct, status=st,
timestamp=datetime.datetime(2018, 1, 1 + i))
NP = self.introspector.generate_models(
table_names=['notes_public'], include_views=True)['notes_public']
self.assertEqual([(np.content, np.timestamp) for np in NP.select()], [
('n3', datetime.datetime(2018, 1, 3)),
('n1', datetime.datetime(2018, 1, 1))])
class Event(TestModel):
key = TextField()
timestamp = DateTimeField(index=True)
metadata = TextField(default='')
class TestInteractiveHelpers(ModelTestCase):
requires = [Category, Event]
def test_generate_models(self):
M = generate_models(self.database)
self.assertTrue('category' in M)
self.assertTrue('event' in M)
def assertFields(m, expected):
actual = [(f.name, f.field_type) for f in m._meta.sorted_fields]
self.assertEqual(actual, expected)
assertFields(M['category'], [('id', 'AUTO'), ('name', 'VARCHAR'),
('parent', 'INT')])
assertFields(M['event'], [
('id', 'AUTO'),
('key', 'TEXT'),
('timestamp', 'DATETIME'),
('metadata', 'TEXT')])
| true
| true
|
f7058f13d20f3f1ca760719af81ddcb6f2a11b08
| 2,334
|
py
|
Python
|
robosuite/models/robots/panda_robot.py
|
StanfordVL/Lasersuite
|
8b78c3d202f2a4b8712c5f228feaf5fae61f16e9
|
[
"MIT"
] | 5
|
2020-08-09T16:47:38.000Z
|
2021-05-06T05:43:12.000Z
|
robosuite/models/robots/panda_robot.py
|
StanfordVL/Lasersuite
|
8b78c3d202f2a4b8712c5f228feaf5fae61f16e9
|
[
"MIT"
] | 1
|
2020-11-06T06:31:08.000Z
|
2020-11-06T06:31:08.000Z
|
robosuite/models/robots/panda_robot.py
|
StanfordVL/Lasersuite
|
8b78c3d202f2a4b8712c5f228feaf5fae61f16e9
|
[
"MIT"
] | null | null | null |
import numpy as np
from .robot_model import RobotModel
from ...utils.mjcf_utils import xml_path_completion
class Panda(RobotModel):
"""Panda is a sensitive single-arm robot designed by Franka."""
def __init__(self, idn=0, bottom_offset=(0, 0, -0.913)):
"""
Args:
idn (int or str): Number or some other unique identification string for this robot instance
bottom_offset (3-list/tuple): x,y,z offset desired from initial coordinates
"""
super().__init__(xml_path_completion("robots/panda/robot.xml"), idn=idn, bottom_offset=bottom_offset)
# Set joint damping
self.set_joint_attribute(attrib="damping", values=np.array((0.1, 0.1, 0.1, 0.1, 0.1, 0.01, 0.01)))
@property
def dof(self):
return 7
@property
def gripper(self):
return "PandaGripper"
@property
def default_controller_config(self):
return "default_panda"
@property
def init_qpos(self):
return np.array([0, np.pi / 16.0, 0.00, -np.pi / 2.0 - np.pi / 3.0, 0.00, np.pi - 0.2, np.pi/4])
@property
def base_xpos_offset(self):
return {
"bins": (-0.5, 0.3, 0),
"empty": (-0.6, 0, 0),
"pegs": (-0.5, 0.15, 0),
"table": lambda table_length: (-0.16 - table_length / 2, 0, 0)
}
@property
def arm_type(self):
return "single"
@property
def _joints(self):
return ["joint1", "joint2", "joint3", "joint4", "joint5", "joint6", "joint7"]
@property
def _eef_name(self):
return "right_hand"
@property
def _robot_base(self):
return "base"
@property
def _actuators(self):
return {
"pos": [], # No position actuators for panda
"vel": [], # No velocity actuators for panda
"torq": ["torq_j1", "torq_j2", "torq_j3", "torq_j4", "torq_j5", "torq_j6", "torq_j7"]
}
@property
def _contact_geoms(self):
return ["link1_collision", "link2_collision", "link3_collision", "link4_collision",
"link5_collision", "link6_collision", "link7_collision"]
@property
def _root(self):
return "link0"
@property
def _links(self):
return ["link1", "link2", "link3", "link4", "link5", "link6", "link7"]
| 28.814815
| 109
| 0.582262
|
import numpy as np
from .robot_model import RobotModel
from ...utils.mjcf_utils import xml_path_completion
class Panda(RobotModel):
def __init__(self, idn=0, bottom_offset=(0, 0, -0.913)):
super().__init__(xml_path_completion("robots/panda/robot.xml"), idn=idn, bottom_offset=bottom_offset)
self.set_joint_attribute(attrib="damping", values=np.array((0.1, 0.1, 0.1, 0.1, 0.1, 0.01, 0.01)))
@property
def dof(self):
return 7
@property
def gripper(self):
return "PandaGripper"
@property
def default_controller_config(self):
return "default_panda"
@property
def init_qpos(self):
return np.array([0, np.pi / 16.0, 0.00, -np.pi / 2.0 - np.pi / 3.0, 0.00, np.pi - 0.2, np.pi/4])
@property
def base_xpos_offset(self):
return {
"bins": (-0.5, 0.3, 0),
"empty": (-0.6, 0, 0),
"pegs": (-0.5, 0.15, 0),
"table": lambda table_length: (-0.16 - table_length / 2, 0, 0)
}
@property
def arm_type(self):
return "single"
@property
def _joints(self):
return ["joint1", "joint2", "joint3", "joint4", "joint5", "joint6", "joint7"]
@property
def _eef_name(self):
return "right_hand"
@property
def _robot_base(self):
return "base"
@property
def _actuators(self):
return {
"pos": [],
"vel": [],
"torq": ["torq_j1", "torq_j2", "torq_j3", "torq_j4", "torq_j5", "torq_j6", "torq_j7"]
}
@property
def _contact_geoms(self):
return ["link1_collision", "link2_collision", "link3_collision", "link4_collision",
"link5_collision", "link6_collision", "link7_collision"]
@property
def _root(self):
return "link0"
@property
def _links(self):
return ["link1", "link2", "link3", "link4", "link5", "link6", "link7"]
| true
| true
|
f7058f6fa6f834bef960f8c52d0d2be8e352837f
| 11,340
|
py
|
Python
|
kubernetes_asyncio/client/models/v1beta1_cron_job_spec.py
|
lsst-sqre/kubernetes_asyncio
|
f028cc793e3a2c519be6a52a49fb77ff0b014c9b
|
[
"Apache-2.0"
] | null | null | null |
kubernetes_asyncio/client/models/v1beta1_cron_job_spec.py
|
lsst-sqre/kubernetes_asyncio
|
f028cc793e3a2c519be6a52a49fb77ff0b014c9b
|
[
"Apache-2.0"
] | null | null | null |
kubernetes_asyncio/client/models/v1beta1_cron_job_spec.py
|
lsst-sqre/kubernetes_asyncio
|
f028cc793e3a2c519be6a52a49fb77ff0b014c9b
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: v1.19.15
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from kubernetes_asyncio.client.configuration import Configuration
class V1beta1CronJobSpec(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'concurrency_policy': 'str',
'failed_jobs_history_limit': 'int',
'job_template': 'V1beta1JobTemplateSpec',
'schedule': 'str',
'starting_deadline_seconds': 'int',
'successful_jobs_history_limit': 'int',
'suspend': 'bool'
}
attribute_map = {
'concurrency_policy': 'concurrencyPolicy',
'failed_jobs_history_limit': 'failedJobsHistoryLimit',
'job_template': 'jobTemplate',
'schedule': 'schedule',
'starting_deadline_seconds': 'startingDeadlineSeconds',
'successful_jobs_history_limit': 'successfulJobsHistoryLimit',
'suspend': 'suspend'
}
def __init__(self, concurrency_policy=None, failed_jobs_history_limit=None, job_template=None, schedule=None, starting_deadline_seconds=None, successful_jobs_history_limit=None, suspend=None, local_vars_configuration=None): # noqa: E501
"""V1beta1CronJobSpec - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._concurrency_policy = None
self._failed_jobs_history_limit = None
self._job_template = None
self._schedule = None
self._starting_deadline_seconds = None
self._successful_jobs_history_limit = None
self._suspend = None
self.discriminator = None
if concurrency_policy is not None:
self.concurrency_policy = concurrency_policy
if failed_jobs_history_limit is not None:
self.failed_jobs_history_limit = failed_jobs_history_limit
self.job_template = job_template
self.schedule = schedule
if starting_deadline_seconds is not None:
self.starting_deadline_seconds = starting_deadline_seconds
if successful_jobs_history_limit is not None:
self.successful_jobs_history_limit = successful_jobs_history_limit
if suspend is not None:
self.suspend = suspend
@property
def concurrency_policy(self):
"""Gets the concurrency_policy of this V1beta1CronJobSpec. # noqa: E501
Specifies how to treat concurrent executions of a Job. Valid values are: - \"Allow\" (default): allows CronJobs to run concurrently; - \"Forbid\": forbids concurrent runs, skipping next run if previous run hasn't finished yet; - \"Replace\": cancels currently running job and replaces it with a new one # noqa: E501
:return: The concurrency_policy of this V1beta1CronJobSpec. # noqa: E501
:rtype: str
"""
return self._concurrency_policy
@concurrency_policy.setter
def concurrency_policy(self, concurrency_policy):
"""Sets the concurrency_policy of this V1beta1CronJobSpec.
Specifies how to treat concurrent executions of a Job. Valid values are: - \"Allow\" (default): allows CronJobs to run concurrently; - \"Forbid\": forbids concurrent runs, skipping next run if previous run hasn't finished yet; - \"Replace\": cancels currently running job and replaces it with a new one # noqa: E501
:param concurrency_policy: The concurrency_policy of this V1beta1CronJobSpec. # noqa: E501
:type: str
"""
self._concurrency_policy = concurrency_policy
@property
def failed_jobs_history_limit(self):
"""Gets the failed_jobs_history_limit of this V1beta1CronJobSpec. # noqa: E501
The number of failed finished jobs to retain. This is a pointer to distinguish between explicit zero and not specified. Defaults to 1. # noqa: E501
:return: The failed_jobs_history_limit of this V1beta1CronJobSpec. # noqa: E501
:rtype: int
"""
return self._failed_jobs_history_limit
@failed_jobs_history_limit.setter
def failed_jobs_history_limit(self, failed_jobs_history_limit):
"""Sets the failed_jobs_history_limit of this V1beta1CronJobSpec.
The number of failed finished jobs to retain. This is a pointer to distinguish between explicit zero and not specified. Defaults to 1. # noqa: E501
:param failed_jobs_history_limit: The failed_jobs_history_limit of this V1beta1CronJobSpec. # noqa: E501
:type: int
"""
self._failed_jobs_history_limit = failed_jobs_history_limit
@property
def job_template(self):
"""Gets the job_template of this V1beta1CronJobSpec. # noqa: E501
:return: The job_template of this V1beta1CronJobSpec. # noqa: E501
:rtype: V1beta1JobTemplateSpec
"""
return self._job_template
@job_template.setter
def job_template(self, job_template):
"""Sets the job_template of this V1beta1CronJobSpec.
:param job_template: The job_template of this V1beta1CronJobSpec. # noqa: E501
:type: V1beta1JobTemplateSpec
"""
if self.local_vars_configuration.client_side_validation and job_template is None: # noqa: E501
raise ValueError("Invalid value for `job_template`, must not be `None`") # noqa: E501
self._job_template = job_template
@property
def schedule(self):
"""Gets the schedule of this V1beta1CronJobSpec. # noqa: E501
The schedule in Cron format, see https://en.wikipedia.org/wiki/Cron. # noqa: E501
:return: The schedule of this V1beta1CronJobSpec. # noqa: E501
:rtype: str
"""
return self._schedule
@schedule.setter
def schedule(self, schedule):
"""Sets the schedule of this V1beta1CronJobSpec.
The schedule in Cron format, see https://en.wikipedia.org/wiki/Cron. # noqa: E501
:param schedule: The schedule of this V1beta1CronJobSpec. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and schedule is None: # noqa: E501
raise ValueError("Invalid value for `schedule`, must not be `None`") # noqa: E501
self._schedule = schedule
@property
def starting_deadline_seconds(self):
"""Gets the starting_deadline_seconds of this V1beta1CronJobSpec. # noqa: E501
Optional deadline in seconds for starting the job if it misses scheduled time for any reason. Missed jobs executions will be counted as failed ones. # noqa: E501
:return: The starting_deadline_seconds of this V1beta1CronJobSpec. # noqa: E501
:rtype: int
"""
return self._starting_deadline_seconds
@starting_deadline_seconds.setter
def starting_deadline_seconds(self, starting_deadline_seconds):
"""Sets the starting_deadline_seconds of this V1beta1CronJobSpec.
Optional deadline in seconds for starting the job if it misses scheduled time for any reason. Missed jobs executions will be counted as failed ones. # noqa: E501
:param starting_deadline_seconds: The starting_deadline_seconds of this V1beta1CronJobSpec. # noqa: E501
:type: int
"""
self._starting_deadline_seconds = starting_deadline_seconds
@property
def successful_jobs_history_limit(self):
"""Gets the successful_jobs_history_limit of this V1beta1CronJobSpec. # noqa: E501
The number of successful finished jobs to retain. This is a pointer to distinguish between explicit zero and not specified. Defaults to 3. # noqa: E501
:return: The successful_jobs_history_limit of this V1beta1CronJobSpec. # noqa: E501
:rtype: int
"""
return self._successful_jobs_history_limit
@successful_jobs_history_limit.setter
def successful_jobs_history_limit(self, successful_jobs_history_limit):
"""Sets the successful_jobs_history_limit of this V1beta1CronJobSpec.
The number of successful finished jobs to retain. This is a pointer to distinguish between explicit zero and not specified. Defaults to 3. # noqa: E501
:param successful_jobs_history_limit: The successful_jobs_history_limit of this V1beta1CronJobSpec. # noqa: E501
:type: int
"""
self._successful_jobs_history_limit = successful_jobs_history_limit
@property
def suspend(self):
"""Gets the suspend of this V1beta1CronJobSpec. # noqa: E501
This flag tells the controller to suspend subsequent executions, it does not apply to already started executions. Defaults to false. # noqa: E501
:return: The suspend of this V1beta1CronJobSpec. # noqa: E501
:rtype: bool
"""
return self._suspend
@suspend.setter
def suspend(self, suspend):
"""Sets the suspend of this V1beta1CronJobSpec.
This flag tells the controller to suspend subsequent executions, it does not apply to already started executions. Defaults to false. # noqa: E501
:param suspend: The suspend of this V1beta1CronJobSpec. # noqa: E501
:type: bool
"""
self._suspend = suspend
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1beta1CronJobSpec):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1beta1CronJobSpec):
return True
return self.to_dict() != other.to_dict()
| 38.969072
| 324
| 0.67134
|
import pprint
import re
import six
from kubernetes_asyncio.client.configuration import Configuration
class V1beta1CronJobSpec(object):
openapi_types = {
'concurrency_policy': 'str',
'failed_jobs_history_limit': 'int',
'job_template': 'V1beta1JobTemplateSpec',
'schedule': 'str',
'starting_deadline_seconds': 'int',
'successful_jobs_history_limit': 'int',
'suspend': 'bool'
}
attribute_map = {
'concurrency_policy': 'concurrencyPolicy',
'failed_jobs_history_limit': 'failedJobsHistoryLimit',
'job_template': 'jobTemplate',
'schedule': 'schedule',
'starting_deadline_seconds': 'startingDeadlineSeconds',
'successful_jobs_history_limit': 'successfulJobsHistoryLimit',
'suspend': 'suspend'
}
def __init__(self, concurrency_policy=None, failed_jobs_history_limit=None, job_template=None, schedule=None, starting_deadline_seconds=None, successful_jobs_history_limit=None, suspend=None, local_vars_configuration=None):
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._concurrency_policy = None
self._failed_jobs_history_limit = None
self._job_template = None
self._schedule = None
self._starting_deadline_seconds = None
self._successful_jobs_history_limit = None
self._suspend = None
self.discriminator = None
if concurrency_policy is not None:
self.concurrency_policy = concurrency_policy
if failed_jobs_history_limit is not None:
self.failed_jobs_history_limit = failed_jobs_history_limit
self.job_template = job_template
self.schedule = schedule
if starting_deadline_seconds is not None:
self.starting_deadline_seconds = starting_deadline_seconds
if successful_jobs_history_limit is not None:
self.successful_jobs_history_limit = successful_jobs_history_limit
if suspend is not None:
self.suspend = suspend
@property
def concurrency_policy(self):
return self._concurrency_policy
@concurrency_policy.setter
def concurrency_policy(self, concurrency_policy):
self._concurrency_policy = concurrency_policy
@property
def failed_jobs_history_limit(self):
return self._failed_jobs_history_limit
@failed_jobs_history_limit.setter
def failed_jobs_history_limit(self, failed_jobs_history_limit):
self._failed_jobs_history_limit = failed_jobs_history_limit
@property
def job_template(self):
return self._job_template
@job_template.setter
def job_template(self, job_template):
if self.local_vars_configuration.client_side_validation and job_template is None:
raise ValueError("Invalid value for `job_template`, must not be `None`")
self._job_template = job_template
@property
def schedule(self):
return self._schedule
@schedule.setter
def schedule(self, schedule):
if self.local_vars_configuration.client_side_validation and schedule is None:
raise ValueError("Invalid value for `schedule`, must not be `None`")
self._schedule = schedule
@property
def starting_deadline_seconds(self):
return self._starting_deadline_seconds
@starting_deadline_seconds.setter
def starting_deadline_seconds(self, starting_deadline_seconds):
self._starting_deadline_seconds = starting_deadline_seconds
@property
def successful_jobs_history_limit(self):
return self._successful_jobs_history_limit
@successful_jobs_history_limit.setter
def successful_jobs_history_limit(self, successful_jobs_history_limit):
self._successful_jobs_history_limit = successful_jobs_history_limit
@property
def suspend(self):
return self._suspend
@suspend.setter
def suspend(self, suspend):
self._suspend = suspend
def to_dict(self):
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
return pprint.pformat(self.to_dict())
def __repr__(self):
return self.to_str()
def __eq__(self, other):
if not isinstance(other, V1beta1CronJobSpec):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
if not isinstance(other, V1beta1CronJobSpec):
return True
return self.to_dict() != other.to_dict()
| true
| true
|
f705902ea07b23776e92a0707bf931cd4228f88f
| 489
|
py
|
Python
|
src/doing/pr/open_pr.py
|
tijlk/doing-cli
|
048e04aaa9f3f99f1a8013fe4cd8a488180f5f2b
|
[
"MIT"
] | 12
|
2021-03-23T13:27:33.000Z
|
2022-02-20T05:44:56.000Z
|
src/doing/pr/open_pr.py
|
tijlk/doing-cli
|
048e04aaa9f3f99f1a8013fe4cd8a488180f5f2b
|
[
"MIT"
] | 63
|
2021-03-23T12:54:00.000Z
|
2022-02-24T15:12:45.000Z
|
src/doing/pr/open_pr.py
|
tijlk/doing-cli
|
048e04aaa9f3f99f1a8013fe4cd8a488180f5f2b
|
[
"MIT"
] | 3
|
2021-04-09T14:40:23.000Z
|
2021-07-15T13:26:40.000Z
|
import click
from doing.utils import get_config
from doing.utils import get_repo_name
from typing import Union
def cmd_open_pr(pullrequest_id: Union[str, int]) -> None:
"""
Open a specific PULLREQUEST_ID. '!' prefix is allowed.
"""
pullrequest_id = str(pullrequest_id).lstrip("!").strip()
project = get_config("project")
organization = get_config("organization")
click.launch(f"{organization}/{project}/_git/{get_repo_name()}/pullrequest/{pullrequest_id}")
| 28.764706
| 97
| 0.721881
|
import click
from doing.utils import get_config
from doing.utils import get_repo_name
from typing import Union
def cmd_open_pr(pullrequest_id: Union[str, int]) -> None:
pullrequest_id = str(pullrequest_id).lstrip("!").strip()
project = get_config("project")
organization = get_config("organization")
click.launch(f"{organization}/{project}/_git/{get_repo_name()}/pullrequest/{pullrequest_id}")
| true
| true
|
f70591035fd509c27b46a06d617bc227adafc45a
| 332
|
py
|
Python
|
time_management/facade_abc.py
|
artorias111/time-management
|
c79c31e070447e70bd3a54e2ad77d88d9821ac2e
|
[
"MIT"
] | null | null | null |
time_management/facade_abc.py
|
artorias111/time-management
|
c79c31e070447e70bd3a54e2ad77d88d9821ac2e
|
[
"MIT"
] | null | null | null |
time_management/facade_abc.py
|
artorias111/time-management
|
c79c31e070447e70bd3a54e2ad77d88d9821ac2e
|
[
"MIT"
] | null | null | null |
from abc import ABC
class AbcFacade(ABC):
"""Any interface will expect to be able to invoke the following methods."""
def count_rows(self):
pass
def get_rows(self):
pass
def get_last_workday(self):
pass
def delete_history(self):
pass
def disconnect(self):
pass
| 15.809524
| 79
| 0.611446
|
from abc import ABC
class AbcFacade(ABC):
def count_rows(self):
pass
def get_rows(self):
pass
def get_last_workday(self):
pass
def delete_history(self):
pass
def disconnect(self):
pass
| true
| true
|
f7059112109560001ae224464bc42dacbe27b49c
| 29,512
|
py
|
Python
|
Virtual-Environment/lib/python3.7/site-packages/rich/pretty.py
|
jguev/instant-insanity
|
98894a228d20e7abc5c6d123772aa8cbdaefd372
|
[
"MIT"
] | 2
|
2020-12-14T21:02:54.000Z
|
2021-12-25T05:49:28.000Z
|
Virtual-Environment/lib/python3.7/site-packages/rich/pretty.py
|
jguev/instant-insanity
|
98894a228d20e7abc5c6d123772aa8cbdaefd372
|
[
"MIT"
] | 4
|
2021-11-11T10:23:35.000Z
|
2021-12-01T10:28:30.000Z
|
Virtual-Environment/lib/python3.7/site-packages/rich/pretty.py
|
jguev/instant-insanity
|
98894a228d20e7abc5c6d123772aa8cbdaefd372
|
[
"MIT"
] | null | null | null |
import builtins
import os
from rich.repr import RichReprResult
import sys
from array import array
from collections import Counter, defaultdict, deque, UserDict, UserList
import dataclasses
from dataclasses import dataclass, fields, is_dataclass
from inspect import isclass
from itertools import islice
import re
from typing import (
DefaultDict,
TYPE_CHECKING,
Any,
Callable,
Dict,
Iterable,
List,
Optional,
Set,
Union,
Tuple,
)
from types import MappingProxyType
try:
import attr as _attr_module
except ImportError: # pragma: no cover
_attr_module = None # type: ignore
from .highlighter import ReprHighlighter
from . import get_console
from ._loop import loop_last
from ._pick import pick_bool
from .abc import RichRenderable
from .cells import cell_len
from .highlighter import ReprHighlighter
from .jupyter import JupyterMixin, JupyterRenderable
from .measure import Measurement
from .text import Text
if TYPE_CHECKING:
from .console import (
Console,
ConsoleOptions,
HighlighterType,
JustifyMethod,
OverflowMethod,
RenderResult,
)
# Matches Jupyter's special methods
_re_jupyter_repr = re.compile(f"^_repr_.+_$")
def _is_attr_object(obj: Any) -> bool:
"""Check if an object was created with attrs module."""
return _attr_module is not None and _attr_module.has(type(obj))
def _get_attr_fields(obj: Any) -> Iterable["_attr_module.Attribute[Any]"]:
"""Get fields for an attrs object."""
return _attr_module.fields(type(obj)) if _attr_module is not None else []
def _is_dataclass_repr(obj: object) -> bool:
"""Check if an instance of a dataclass contains the default repr.
Args:
obj (object): A dataclass instance.
Returns:
bool: True if the default repr is used, False if there is a custom repr.
"""
# Digging in to a lot of internals here
# Catching all exceptions in case something is missing on a non CPython implementation
try:
return obj.__repr__.__code__.co_filename == dataclasses.__file__
except Exception: # pragma: no coverage
return False
def install(
console: Optional["Console"] = None,
overflow: "OverflowMethod" = "ignore",
crop: bool = False,
indent_guides: bool = False,
max_length: Optional[int] = None,
max_string: Optional[int] = None,
expand_all: bool = False,
) -> None:
"""Install automatic pretty printing in the Python REPL.
Args:
console (Console, optional): Console instance or ``None`` to use global console. Defaults to None.
overflow (Optional[OverflowMethod], optional): Overflow method. Defaults to "ignore".
crop (Optional[bool], optional): Enable cropping of long lines. Defaults to False.
indent_guides (bool, optional): Enable indentation guides. Defaults to False.
max_length (int, optional): Maximum length of containers before abbreviating, or None for no abbreviation.
Defaults to None.
max_string (int, optional): Maximum length of string before truncating, or None to disable. Defaults to None.
expand_all (bool, optional): Expand all containers. Defaults to False.
max_frames (int): Maximum number of frames to show in a traceback, 0 for no maximum. Defaults to 100.
"""
from rich import get_console
from .console import ConsoleRenderable # needed here to prevent circular import
console = console or get_console()
assert console is not None
def display_hook(value: Any) -> None:
"""Replacement sys.displayhook which prettifies objects with Rich."""
if value is not None:
assert console is not None
builtins._ = None # type: ignore
console.print(
value
if isinstance(value, RichRenderable)
else Pretty(
value,
overflow=overflow,
indent_guides=indent_guides,
max_length=max_length,
max_string=max_string,
expand_all=expand_all,
),
crop=crop,
)
builtins._ = value # type: ignore
def ipy_display_hook(value: Any) -> None: # pragma: no cover
assert console is not None
# always skip rich generated jupyter renderables or None values
if isinstance(value, JupyterRenderable) or value is None:
return
# on jupyter rich display, if using one of the special representations don't use rich
if console.is_jupyter and any(
_re_jupyter_repr.match(attr) for attr in dir(value)
):
return
# certain renderables should start on a new line
if isinstance(value, ConsoleRenderable):
console.line()
console.print(
value
if isinstance(value, RichRenderable)
else Pretty(
value,
overflow=overflow,
indent_guides=indent_guides,
max_length=max_length,
max_string=max_string,
expand_all=expand_all,
margin=12,
),
crop=crop,
new_line_start=True,
)
try: # pragma: no cover
ip = get_ipython() # type: ignore
from IPython.core.formatters import BaseFormatter
class RichFormatter(BaseFormatter): # type: ignore
pprint: bool = True
def __call__(self, value: Any) -> Any:
if self.pprint:
return ipy_display_hook(value)
else:
return repr(value)
# replace plain text formatter with rich formatter
rich_formatter = RichFormatter()
ip.display_formatter.formatters["text/plain"] = rich_formatter
except Exception:
sys.displayhook = display_hook
class Pretty(JupyterMixin):
"""A rich renderable that pretty prints an object.
Args:
_object (Any): An object to pretty print.
highlighter (HighlighterType, optional): Highlighter object to apply to result, or None for ReprHighlighter. Defaults to None.
indent_size (int, optional): Number of spaces in indent. Defaults to 4.
justify (JustifyMethod, optional): Justify method, or None for default. Defaults to None.
overflow (OverflowMethod, optional): Overflow method, or None for default. Defaults to None.
no_wrap (Optional[bool], optional): Disable word wrapping. Defaults to False.
indent_guides (bool, optional): Enable indentation guides. Defaults to False.
max_length (int, optional): Maximum length of containers before abbreviating, or None for no abbreviation.
Defaults to None.
max_string (int, optional): Maximum length of string before truncating, or None to disable. Defaults to None.
expand_all (bool, optional): Expand all containers. Defaults to False.
margin (int, optional): Subtrace a margin from width to force containers to expand earlier. Defaults to 0.
insert_line (bool, optional): Insert a new line if the output has multiple new lines. Defaults to False.
"""
def __init__(
self,
_object: Any,
highlighter: Optional["HighlighterType"] = None,
*,
indent_size: int = 4,
justify: Optional["JustifyMethod"] = None,
overflow: Optional["OverflowMethod"] = None,
no_wrap: Optional[bool] = False,
indent_guides: bool = False,
max_length: Optional[int] = None,
max_string: Optional[int] = None,
expand_all: bool = False,
margin: int = 0,
insert_line: bool = False,
) -> None:
self._object = _object
self.highlighter = highlighter or ReprHighlighter()
self.indent_size = indent_size
self.justify: Optional["JustifyMethod"] = justify
self.overflow: Optional["OverflowMethod"] = overflow
self.no_wrap = no_wrap
self.indent_guides = indent_guides
self.max_length = max_length
self.max_string = max_string
self.expand_all = expand_all
self.margin = margin
self.insert_line = insert_line
def __rich_console__(
self, console: "Console", options: "ConsoleOptions"
) -> "RenderResult":
pretty_str = pretty_repr(
self._object,
max_width=options.max_width - self.margin,
indent_size=self.indent_size,
max_length=self.max_length,
max_string=self.max_string,
expand_all=self.expand_all,
)
pretty_text = Text(
pretty_str,
justify=self.justify or options.justify,
overflow=self.overflow or options.overflow,
no_wrap=pick_bool(self.no_wrap, options.no_wrap),
style="pretty",
)
pretty_text = (
self.highlighter(pretty_text)
if pretty_text
else Text(
f"{type(self._object)}.__repr__ returned empty string",
style="dim italic",
)
)
if self.indent_guides and not options.ascii_only:
pretty_text = pretty_text.with_indent_guides(
self.indent_size, style="repr.indent"
)
if self.insert_line and "\n" in pretty_text:
yield ""
yield pretty_text
def __rich_measure__(
self, console: "Console", options: "ConsoleOptions"
) -> "Measurement":
pretty_str = pretty_repr(
self._object,
max_width=options.max_width,
indent_size=self.indent_size,
max_length=self.max_length,
max_string=self.max_string,
)
text_width = (
max(cell_len(line) for line in pretty_str.splitlines()) if pretty_str else 0
)
return Measurement(text_width, text_width)
def _get_braces_for_defaultdict(_object: DefaultDict[Any, Any]) -> Tuple[str, str, str]:
return (
f"defaultdict({_object.default_factory!r}, {{",
"})",
f"defaultdict({_object.default_factory!r}, {{}})",
)
def _get_braces_for_array(_object: "array[Any]") -> Tuple[str, str, str]:
return (f"array({_object.typecode!r}, [", "])", "array({_object.typecode!r})")
_BRACES: Dict[type, Callable[[Any], Tuple[str, str, str]]] = {
os._Environ: lambda _object: ("environ({", "})", "environ({})"),
array: _get_braces_for_array,
defaultdict: _get_braces_for_defaultdict,
Counter: lambda _object: ("Counter({", "})", "Counter()"),
deque: lambda _object: ("deque([", "])", "deque()"),
dict: lambda _object: ("{", "}", "{}"),
UserDict: lambda _object: ("{", "}", "{}"),
frozenset: lambda _object: ("frozenset({", "})", "frozenset()"),
list: lambda _object: ("[", "]", "[]"),
UserList: lambda _object: ("[", "]", "[]"),
set: lambda _object: ("{", "}", "set()"),
tuple: lambda _object: ("(", ")", "()"),
MappingProxyType: lambda _object: ("mappingproxy({", "})", "mappingproxy({})"),
}
_CONTAINERS = tuple(_BRACES.keys())
_MAPPING_CONTAINERS = (dict, os._Environ, MappingProxyType, UserDict)
def is_expandable(obj: Any) -> bool:
"""Check if an object may be expanded by pretty print."""
return (
isinstance(obj, _CONTAINERS)
or (is_dataclass(obj))
or (hasattr(obj, "__rich_repr__"))
or _is_attr_object(obj)
) and not isclass(obj)
@dataclass
class Node:
"""A node in a repr tree. May be atomic or a container."""
key_repr: str = ""
value_repr: str = ""
open_brace: str = ""
close_brace: str = ""
empty: str = ""
last: bool = False
is_tuple: bool = False
children: Optional[List["Node"]] = None
key_separator = ": "
separator: str = ", "
def iter_tokens(self) -> Iterable[str]:
"""Generate tokens for this node."""
if self.key_repr:
yield self.key_repr
yield self.key_separator
if self.value_repr:
yield self.value_repr
elif self.children is not None:
if self.children:
yield self.open_brace
if self.is_tuple and len(self.children) == 1:
yield from self.children[0].iter_tokens()
yield ","
else:
for child in self.children:
yield from child.iter_tokens()
if not child.last:
yield self.separator
yield self.close_brace
else:
yield self.empty
def check_length(self, start_length: int, max_length: int) -> bool:
"""Check the length fits within a limit.
Args:
start_length (int): Starting length of the line (indent, prefix, suffix).
max_length (int): Maximum length.
Returns:
bool: True if the node can be rendered within max length, otherwise False.
"""
total_length = start_length
for token in self.iter_tokens():
total_length += cell_len(token)
if total_length > max_length:
return False
return True
def __str__(self) -> str:
repr_text = "".join(self.iter_tokens())
return repr_text
def render(
self, max_width: int = 80, indent_size: int = 4, expand_all: bool = False
) -> str:
"""Render the node to a pretty repr.
Args:
max_width (int, optional): Maximum width of the repr. Defaults to 80.
indent_size (int, optional): Size of indents. Defaults to 4.
expand_all (bool, optional): Expand all levels. Defaults to False.
Returns:
str: A repr string of the original object.
"""
lines = [_Line(node=self, is_root=True)]
line_no = 0
while line_no < len(lines):
line = lines[line_no]
if line.expandable and not line.expanded:
if expand_all or not line.check_length(max_width):
lines[line_no : line_no + 1] = line.expand(indent_size)
line_no += 1
repr_str = "\n".join(str(line) for line in lines)
return repr_str
@dataclass
class _Line:
"""A line in repr output."""
parent: Optional["_Line"] = None
is_root: bool = False
node: Optional[Node] = None
text: str = ""
suffix: str = ""
whitespace: str = ""
expanded: bool = False
last: bool = False
@property
def expandable(self) -> bool:
"""Check if the line may be expanded."""
return bool(self.node is not None and self.node.children)
def check_length(self, max_length: int) -> bool:
"""Check this line fits within a given number of cells."""
start_length = (
len(self.whitespace) + cell_len(self.text) + cell_len(self.suffix)
)
assert self.node is not None
return self.node.check_length(start_length, max_length)
def expand(self, indent_size: int) -> Iterable["_Line"]:
"""Expand this line by adding children on their own line."""
node = self.node
assert node is not None
whitespace = self.whitespace
assert node.children
if node.key_repr:
new_line = yield _Line(
text=f"{node.key_repr}{node.key_separator}{node.open_brace}",
whitespace=whitespace,
)
else:
new_line = yield _Line(text=node.open_brace, whitespace=whitespace)
child_whitespace = self.whitespace + " " * indent_size
tuple_of_one = node.is_tuple and len(node.children) == 1
for last, child in loop_last(node.children):
separator = "," if tuple_of_one else node.separator
line = _Line(
parent=new_line,
node=child,
whitespace=child_whitespace,
suffix=separator,
last=last and not tuple_of_one,
)
yield line
yield _Line(
text=node.close_brace,
whitespace=whitespace,
suffix=self.suffix,
last=self.last,
)
def __str__(self) -> str:
if self.last:
return f"{self.whitespace}{self.text}{self.node or ''}"
else:
return (
f"{self.whitespace}{self.text}{self.node or ''}{self.suffix.rstrip()}"
)
def traverse(
_object: Any, max_length: Optional[int] = None, max_string: Optional[int] = None
) -> Node:
"""Traverse object and generate a tree.
Args:
_object (Any): Object to be traversed.
max_length (int, optional): Maximum length of containers before abbreviating, or None for no abbreviation.
Defaults to None.
max_string (int, optional): Maximum length of string before truncating, or None to disable truncating.
Defaults to None.
Returns:
Node: The root of a tree structure which can be used to render a pretty repr.
"""
def to_repr(obj: Any) -> str:
"""Get repr string for an object, but catch errors."""
if (
max_string is not None
and isinstance(obj, (bytes, str))
and len(obj) > max_string
):
truncated = len(obj) - max_string
obj_repr = f"{obj[:max_string]!r}+{truncated}"
else:
try:
obj_repr = repr(obj)
except Exception as error:
obj_repr = f"<repr-error {str(error)!r}>"
return obj_repr
visited_ids: Set[int] = set()
push_visited = visited_ids.add
pop_visited = visited_ids.remove
def _traverse(obj: Any, root: bool = False) -> Node:
"""Walk the object depth first."""
obj_type = type(obj)
py_version = (sys.version_info.major, sys.version_info.minor)
children: List[Node]
def iter_rich_args(rich_args: Any) -> Iterable[Union[Any, Tuple[str, Any]]]:
for arg in rich_args:
if isinstance(arg, tuple):
if len(arg) == 3:
key, child, default = arg
if default == child:
continue
yield key, child
elif len(arg) == 2:
key, child = arg
yield key, child
elif len(arg) == 1:
yield arg[0]
else:
yield arg
try:
fake_attributes = hasattr(
obj, "awehoi234_wdfjwljet234_234wdfoijsdfmmnxpi492"
)
except Exception:
fake_attributes = False
rich_repr_result: Optional[RichReprResult] = None
if not fake_attributes:
try:
if hasattr(obj, "__rich_repr__") and not isclass(obj):
rich_repr_result = obj.__rich_repr__()
except Exception:
pass
if rich_repr_result is not None:
angular = getattr(obj.__rich_repr__, "angular", False)
args = list(iter_rich_args(rich_repr_result))
class_name = obj.__class__.__name__
if args:
children = []
append = children.append
if angular:
node = Node(
open_brace=f"<{class_name} ",
close_brace=">",
children=children,
last=root,
separator=" ",
)
else:
node = Node(
open_brace=f"{class_name}(",
close_brace=")",
children=children,
last=root,
)
for last, arg in loop_last(args):
if isinstance(arg, tuple):
key, child = arg
child_node = _traverse(child)
child_node.last = last
child_node.key_repr = key
child_node.key_separator = "="
append(child_node)
else:
child_node = _traverse(arg)
child_node.last = last
append(child_node)
else:
node = Node(
value_repr=f"<{class_name}>" if angular else f"{class_name}()",
children=[],
last=root,
)
elif _is_attr_object(obj) and not fake_attributes:
children = []
append = children.append
attr_fields = _get_attr_fields(obj)
if attr_fields:
node = Node(
open_brace=f"{obj.__class__.__name__}(",
close_brace=")",
children=children,
last=root,
)
def iter_attrs() -> Iterable[
Tuple[str, Any, Optional[Callable[[Any], str]]]
]:
"""Iterate over attr fields and values."""
for attr in attr_fields:
if attr.repr:
try:
value = getattr(obj, attr.name)
except Exception as error:
# Can happen, albeit rarely
yield (attr.name, error, None)
else:
yield (
attr.name,
value,
attr.repr if callable(attr.repr) else None,
)
for last, (name, value, repr_callable) in loop_last(iter_attrs()):
if repr_callable:
child_node = Node(value_repr=str(repr_callable(value)))
else:
child_node = _traverse(value)
child_node.last = last
child_node.key_repr = name
child_node.key_separator = "="
append(child_node)
else:
node = Node(
value_repr=f"{obj.__class__.__name__}()", children=[], last=root
)
elif (
is_dataclass(obj)
and not isinstance(obj, type)
and not fake_attributes
and (_is_dataclass_repr(obj) or py_version == (3, 6))
):
obj_id = id(obj)
if obj_id in visited_ids:
# Recursion detected
return Node(value_repr="...")
push_visited(obj_id)
children = []
append = children.append
node = Node(
open_brace=f"{obj.__class__.__name__}(",
close_brace=")",
children=children,
last=root,
)
for last, field in loop_last(field for field in fields(obj) if field.repr):
child_node = _traverse(getattr(obj, field.name))
child_node.key_repr = field.name
child_node.last = last
child_node.key_separator = "="
append(child_node)
pop_visited(obj_id)
elif isinstance(obj, _CONTAINERS):
for container_type in _CONTAINERS:
if isinstance(obj, container_type):
obj_type = container_type
break
obj_id = id(obj)
if obj_id in visited_ids:
# Recursion detected
return Node(value_repr="...")
push_visited(obj_id)
open_brace, close_brace, empty = _BRACES[obj_type](obj)
if obj_type.__repr__ != type(obj).__repr__:
node = Node(value_repr=to_repr(obj), last=root)
elif obj:
children = []
node = Node(
open_brace=open_brace,
close_brace=close_brace,
children=children,
last=root,
)
append = children.append
num_items = len(obj)
last_item_index = num_items - 1
if isinstance(obj, _MAPPING_CONTAINERS):
iter_items = iter(obj.items())
if max_length is not None:
iter_items = islice(iter_items, max_length)
for index, (key, child) in enumerate(iter_items):
child_node = _traverse(child)
child_node.key_repr = to_repr(key)
child_node.last = index == last_item_index
append(child_node)
else:
iter_values = iter(obj)
if max_length is not None:
iter_values = islice(iter_values, max_length)
for index, child in enumerate(iter_values):
child_node = _traverse(child)
child_node.last = index == last_item_index
append(child_node)
if max_length is not None and num_items > max_length:
append(Node(value_repr=f"... +{num_items-max_length}", last=True))
else:
node = Node(empty=empty, children=[], last=root)
pop_visited(obj_id)
else:
node = Node(value_repr=to_repr(obj), last=root)
node.is_tuple = isinstance(obj, tuple)
return node
node = _traverse(_object, root=True)
return node
def pretty_repr(
_object: Any,
*,
max_width: int = 80,
indent_size: int = 4,
max_length: Optional[int] = None,
max_string: Optional[int] = None,
expand_all: bool = False,
) -> str:
"""Prettify repr string by expanding on to new lines to fit within a given width.
Args:
_object (Any): Object to repr.
max_width (int, optional): Desired maximum width of repr string. Defaults to 80.
indent_size (int, optional): Number of spaces to indent. Defaults to 4.
max_length (int, optional): Maximum length of containers before abbreviating, or None for no abbreviation.
Defaults to None.
max_string (int, optional): Maximum length of string before truncating, or None to disable truncating.
Defaults to None.
expand_all (bool, optional): Expand all containers regardless of available width. Defaults to False.
Returns:
str: A possibly multi-line representation of the object.
"""
if isinstance(_object, Node):
node = _object
else:
node = traverse(_object, max_length=max_length, max_string=max_string)
repr_str = node.render(
max_width=max_width, indent_size=indent_size, expand_all=expand_all
)
return repr_str
def pprint(
_object: Any,
*,
console: Optional["Console"] = None,
indent_guides: bool = True,
max_length: Optional[int] = None,
max_string: Optional[int] = None,
expand_all: bool = False,
) -> None:
"""A convenience function for pretty printing.
Args:
_object (Any): Object to pretty print.
console (Console, optional): Console instance, or None to use default. Defaults to None.
max_length (int, optional): Maximum length of containers before abbreviating, or None for no abbreviation.
Defaults to None.
max_string (int, optional): Maximum length of strings before truncating, or None to disable. Defaults to None.
indent_guides (bool, optional): Enable indentation guides. Defaults to True.
expand_all (bool, optional): Expand all containers. Defaults to False.
"""
_console = get_console() if console is None else console
_console.print(
Pretty(
_object,
max_length=max_length,
max_string=max_string,
indent_guides=indent_guides,
expand_all=expand_all,
overflow="ignore",
),
soft_wrap=True,
)
if __name__ == "__main__": # pragma: no cover
class BrokenRepr:
def __repr__(self) -> str:
1 / 0
return "this will fail"
d = defaultdict(int)
d["foo"] = 5
data = {
"foo": [
1,
"Hello World!",
100.123,
323.232,
432324.0,
{5, 6, 7, (1, 2, 3, 4), 8},
],
"bar": frozenset({1, 2, 3}),
"defaultdict": defaultdict(
list, {"crumble": ["apple", "rhubarb", "butter", "sugar", "flour"]}
),
"counter": Counter(
[
"apple",
"orange",
"pear",
"kumquat",
"kumquat",
"durian" * 100,
]
),
"atomic": (False, True, None),
"Broken": BrokenRepr(),
}
data["foo"].append(data) # type: ignore
from rich import print
print(Pretty(data, indent_guides=True, max_string=20))
| 35.386091
| 134
| 0.557366
|
import builtins
import os
from rich.repr import RichReprResult
import sys
from array import array
from collections import Counter, defaultdict, deque, UserDict, UserList
import dataclasses
from dataclasses import dataclass, fields, is_dataclass
from inspect import isclass
from itertools import islice
import re
from typing import (
DefaultDict,
TYPE_CHECKING,
Any,
Callable,
Dict,
Iterable,
List,
Optional,
Set,
Union,
Tuple,
)
from types import MappingProxyType
try:
import attr as _attr_module
except ImportError:
_attr_module = None
from .highlighter import ReprHighlighter
from . import get_console
from ._loop import loop_last
from ._pick import pick_bool
from .abc import RichRenderable
from .cells import cell_len
from .highlighter import ReprHighlighter
from .jupyter import JupyterMixin, JupyterRenderable
from .measure import Measurement
from .text import Text
if TYPE_CHECKING:
from .console import (
Console,
ConsoleOptions,
HighlighterType,
JustifyMethod,
OverflowMethod,
RenderResult,
)
_re_jupyter_repr = re.compile(f"^_repr_.+_$")
def _is_attr_object(obj: Any) -> bool:
return _attr_module is not None and _attr_module.has(type(obj))
def _get_attr_fields(obj: Any) -> Iterable["_attr_module.Attribute[Any]"]:
return _attr_module.fields(type(obj)) if _attr_module is not None else []
def _is_dataclass_repr(obj: object) -> bool:
# Digging in to a lot of internals here
# Catching all exceptions in case something is missing on a non CPython implementation
try:
return obj.__repr__.__code__.co_filename == dataclasses.__file__
except Exception: # pragma: no coverage
return False
def install(
console: Optional["Console"] = None,
overflow: "OverflowMethod" = "ignore",
crop: bool = False,
indent_guides: bool = False,
max_length: Optional[int] = None,
max_string: Optional[int] = None,
expand_all: bool = False,
) -> None:
from rich import get_console
from .console import ConsoleRenderable # needed here to prevent circular import
console = console or get_console()
assert console is not None
def display_hook(value: Any) -> None:
if value is not None:
assert console is not None
builtins._ = None # type: ignore
console.print(
value
if isinstance(value, RichRenderable)
else Pretty(
value,
overflow=overflow,
indent_guides=indent_guides,
max_length=max_length,
max_string=max_string,
expand_all=expand_all,
),
crop=crop,
)
builtins._ = value # type: ignore
def ipy_display_hook(value: Any) -> None: # pragma: no cover
assert console is not None
# always skip rich generated jupyter renderables or None values
if isinstance(value, JupyterRenderable) or value is None:
return
# on jupyter rich display, if using one of the special representations don't use rich
if console.is_jupyter and any(
_re_jupyter_repr.match(attr) for attr in dir(value)
):
return
if isinstance(value, ConsoleRenderable):
console.line()
console.print(
value
if isinstance(value, RichRenderable)
else Pretty(
value,
overflow=overflow,
indent_guides=indent_guides,
max_length=max_length,
max_string=max_string,
expand_all=expand_all,
margin=12,
),
crop=crop,
new_line_start=True,
)
try:
ip = get_ipython()
from IPython.core.formatters import BaseFormatter
class RichFormatter(BaseFormatter):
pprint: bool = True
def __call__(self, value: Any) -> Any:
if self.pprint:
return ipy_display_hook(value)
else:
return repr(value)
rich_formatter = RichFormatter()
ip.display_formatter.formatters["text/plain"] = rich_formatter
except Exception:
sys.displayhook = display_hook
class Pretty(JupyterMixin):
def __init__(
self,
_object: Any,
highlighter: Optional["HighlighterType"] = None,
*,
indent_size: int = 4,
justify: Optional["JustifyMethod"] = None,
overflow: Optional["OverflowMethod"] = None,
no_wrap: Optional[bool] = False,
indent_guides: bool = False,
max_length: Optional[int] = None,
max_string: Optional[int] = None,
expand_all: bool = False,
margin: int = 0,
insert_line: bool = False,
) -> None:
self._object = _object
self.highlighter = highlighter or ReprHighlighter()
self.indent_size = indent_size
self.justify: Optional["JustifyMethod"] = justify
self.overflow: Optional["OverflowMethod"] = overflow
self.no_wrap = no_wrap
self.indent_guides = indent_guides
self.max_length = max_length
self.max_string = max_string
self.expand_all = expand_all
self.margin = margin
self.insert_line = insert_line
def __rich_console__(
self, console: "Console", options: "ConsoleOptions"
) -> "RenderResult":
pretty_str = pretty_repr(
self._object,
max_width=options.max_width - self.margin,
indent_size=self.indent_size,
max_length=self.max_length,
max_string=self.max_string,
expand_all=self.expand_all,
)
pretty_text = Text(
pretty_str,
justify=self.justify or options.justify,
overflow=self.overflow or options.overflow,
no_wrap=pick_bool(self.no_wrap, options.no_wrap),
style="pretty",
)
pretty_text = (
self.highlighter(pretty_text)
if pretty_text
else Text(
f"{type(self._object)}.__repr__ returned empty string",
style="dim italic",
)
)
if self.indent_guides and not options.ascii_only:
pretty_text = pretty_text.with_indent_guides(
self.indent_size, style="repr.indent"
)
if self.insert_line and "\n" in pretty_text:
yield ""
yield pretty_text
def __rich_measure__(
self, console: "Console", options: "ConsoleOptions"
) -> "Measurement":
pretty_str = pretty_repr(
self._object,
max_width=options.max_width,
indent_size=self.indent_size,
max_length=self.max_length,
max_string=self.max_string,
)
text_width = (
max(cell_len(line) for line in pretty_str.splitlines()) if pretty_str else 0
)
return Measurement(text_width, text_width)
def _get_braces_for_defaultdict(_object: DefaultDict[Any, Any]) -> Tuple[str, str, str]:
return (
f"defaultdict({_object.default_factory!r}, {{",
"})",
f"defaultdict({_object.default_factory!r}, {{}})",
)
def _get_braces_for_array(_object: "array[Any]") -> Tuple[str, str, str]:
return (f"array({_object.typecode!r}, [", "])", "array({_object.typecode!r})")
_BRACES: Dict[type, Callable[[Any], Tuple[str, str, str]]] = {
os._Environ: lambda _object: ("environ({", "})", "environ({})"),
array: _get_braces_for_array,
defaultdict: _get_braces_for_defaultdict,
Counter: lambda _object: ("Counter({", "})", "Counter()"),
deque: lambda _object: ("deque([", "])", "deque()"),
dict: lambda _object: ("{", "}", "{}"),
UserDict: lambda _object: ("{", "}", "{}"),
frozenset: lambda _object: ("frozenset({", "})", "frozenset()"),
list: lambda _object: ("[", "]", "[]"),
UserList: lambda _object: ("[", "]", "[]"),
set: lambda _object: ("{", "}", "set()"),
tuple: lambda _object: ("(", ")", "()"),
MappingProxyType: lambda _object: ("mappingproxy({", "})", "mappingproxy({})"),
}
_CONTAINERS = tuple(_BRACES.keys())
_MAPPING_CONTAINERS = (dict, os._Environ, MappingProxyType, UserDict)
def is_expandable(obj: Any) -> bool:
return (
isinstance(obj, _CONTAINERS)
or (is_dataclass(obj))
or (hasattr(obj, "__rich_repr__"))
or _is_attr_object(obj)
) and not isclass(obj)
@dataclass
class Node:
key_repr: str = ""
value_repr: str = ""
open_brace: str = ""
close_brace: str = ""
empty: str = ""
last: bool = False
is_tuple: bool = False
children: Optional[List["Node"]] = None
key_separator = ": "
separator: str = ", "
def iter_tokens(self) -> Iterable[str]:
if self.key_repr:
yield self.key_repr
yield self.key_separator
if self.value_repr:
yield self.value_repr
elif self.children is not None:
if self.children:
yield self.open_brace
if self.is_tuple and len(self.children) == 1:
yield from self.children[0].iter_tokens()
yield ","
else:
for child in self.children:
yield from child.iter_tokens()
if not child.last:
yield self.separator
yield self.close_brace
else:
yield self.empty
def check_length(self, start_length: int, max_length: int) -> bool:
total_length = start_length
for token in self.iter_tokens():
total_length += cell_len(token)
if total_length > max_length:
return False
return True
def __str__(self) -> str:
repr_text = "".join(self.iter_tokens())
return repr_text
def render(
self, max_width: int = 80, indent_size: int = 4, expand_all: bool = False
) -> str:
lines = [_Line(node=self, is_root=True)]
line_no = 0
while line_no < len(lines):
line = lines[line_no]
if line.expandable and not line.expanded:
if expand_all or not line.check_length(max_width):
lines[line_no : line_no + 1] = line.expand(indent_size)
line_no += 1
repr_str = "\n".join(str(line) for line in lines)
return repr_str
@dataclass
class _Line:
parent: Optional["_Line"] = None
is_root: bool = False
node: Optional[Node] = None
text: str = ""
suffix: str = ""
whitespace: str = ""
expanded: bool = False
last: bool = False
@property
def expandable(self) -> bool:
return bool(self.node is not None and self.node.children)
def check_length(self, max_length: int) -> bool:
start_length = (
len(self.whitespace) + cell_len(self.text) + cell_len(self.suffix)
)
assert self.node is not None
return self.node.check_length(start_length, max_length)
def expand(self, indent_size: int) -> Iterable["_Line"]:
node = self.node
assert node is not None
whitespace = self.whitespace
assert node.children
if node.key_repr:
new_line = yield _Line(
text=f"{node.key_repr}{node.key_separator}{node.open_brace}",
whitespace=whitespace,
)
else:
new_line = yield _Line(text=node.open_brace, whitespace=whitespace)
child_whitespace = self.whitespace + " " * indent_size
tuple_of_one = node.is_tuple and len(node.children) == 1
for last, child in loop_last(node.children):
separator = "," if tuple_of_one else node.separator
line = _Line(
parent=new_line,
node=child,
whitespace=child_whitespace,
suffix=separator,
last=last and not tuple_of_one,
)
yield line
yield _Line(
text=node.close_brace,
whitespace=whitespace,
suffix=self.suffix,
last=self.last,
)
def __str__(self) -> str:
if self.last:
return f"{self.whitespace}{self.text}{self.node or ''}"
else:
return (
f"{self.whitespace}{self.text}{self.node or ''}{self.suffix.rstrip()}"
)
def traverse(
_object: Any, max_length: Optional[int] = None, max_string: Optional[int] = None
) -> Node:
def to_repr(obj: Any) -> str:
if (
max_string is not None
and isinstance(obj, (bytes, str))
and len(obj) > max_string
):
truncated = len(obj) - max_string
obj_repr = f"{obj[:max_string]!r}+{truncated}"
else:
try:
obj_repr = repr(obj)
except Exception as error:
obj_repr = f"<repr-error {str(error)!r}>"
return obj_repr
visited_ids: Set[int] = set()
push_visited = visited_ids.add
pop_visited = visited_ids.remove
def _traverse(obj: Any, root: bool = False) -> Node:
obj_type = type(obj)
py_version = (sys.version_info.major, sys.version_info.minor)
children: List[Node]
def iter_rich_args(rich_args: Any) -> Iterable[Union[Any, Tuple[str, Any]]]:
for arg in rich_args:
if isinstance(arg, tuple):
if len(arg) == 3:
key, child, default = arg
if default == child:
continue
yield key, child
elif len(arg) == 2:
key, child = arg
yield key, child
elif len(arg) == 1:
yield arg[0]
else:
yield arg
try:
fake_attributes = hasattr(
obj, "awehoi234_wdfjwljet234_234wdfoijsdfmmnxpi492"
)
except Exception:
fake_attributes = False
rich_repr_result: Optional[RichReprResult] = None
if not fake_attributes:
try:
if hasattr(obj, "__rich_repr__") and not isclass(obj):
rich_repr_result = obj.__rich_repr__()
except Exception:
pass
if rich_repr_result is not None:
angular = getattr(obj.__rich_repr__, "angular", False)
args = list(iter_rich_args(rich_repr_result))
class_name = obj.__class__.__name__
if args:
children = []
append = children.append
if angular:
node = Node(
open_brace=f"<{class_name} ",
close_brace=">",
children=children,
last=root,
separator=" ",
)
else:
node = Node(
open_brace=f"{class_name}(",
close_brace=")",
children=children,
last=root,
)
for last, arg in loop_last(args):
if isinstance(arg, tuple):
key, child = arg
child_node = _traverse(child)
child_node.last = last
child_node.key_repr = key
child_node.key_separator = "="
append(child_node)
else:
child_node = _traverse(arg)
child_node.last = last
append(child_node)
else:
node = Node(
value_repr=f"<{class_name}>" if angular else f"{class_name}()",
children=[],
last=root,
)
elif _is_attr_object(obj) and not fake_attributes:
children = []
append = children.append
attr_fields = _get_attr_fields(obj)
if attr_fields:
node = Node(
open_brace=f"{obj.__class__.__name__}(",
close_brace=")",
children=children,
last=root,
)
def iter_attrs() -> Iterable[
Tuple[str, Any, Optional[Callable[[Any], str]]]
]:
"""Iterate over attr fields and values."""
for attr in attr_fields:
if attr.repr:
try:
value = getattr(obj, attr.name)
except Exception as error:
yield (attr.name, error, None)
else:
yield (
attr.name,
value,
attr.repr if callable(attr.repr) else None,
)
for last, (name, value, repr_callable) in loop_last(iter_attrs()):
if repr_callable:
child_node = Node(value_repr=str(repr_callable(value)))
else:
child_node = _traverse(value)
child_node.last = last
child_node.key_repr = name
child_node.key_separator = "="
append(child_node)
else:
node = Node(
value_repr=f"{obj.__class__.__name__}()", children=[], last=root
)
elif (
is_dataclass(obj)
and not isinstance(obj, type)
and not fake_attributes
and (_is_dataclass_repr(obj) or py_version == (3, 6))
):
obj_id = id(obj)
if obj_id in visited_ids:
return Node(value_repr="...")
push_visited(obj_id)
children = []
append = children.append
node = Node(
open_brace=f"{obj.__class__.__name__}(",
close_brace=")",
children=children,
last=root,
)
for last, field in loop_last(field for field in fields(obj) if field.repr):
child_node = _traverse(getattr(obj, field.name))
child_node.key_repr = field.name
child_node.last = last
child_node.key_separator = "="
append(child_node)
pop_visited(obj_id)
elif isinstance(obj, _CONTAINERS):
for container_type in _CONTAINERS:
if isinstance(obj, container_type):
obj_type = container_type
break
obj_id = id(obj)
if obj_id in visited_ids:
return Node(value_repr="...")
push_visited(obj_id)
open_brace, close_brace, empty = _BRACES[obj_type](obj)
if obj_type.__repr__ != type(obj).__repr__:
node = Node(value_repr=to_repr(obj), last=root)
elif obj:
children = []
node = Node(
open_brace=open_brace,
close_brace=close_brace,
children=children,
last=root,
)
append = children.append
num_items = len(obj)
last_item_index = num_items - 1
if isinstance(obj, _MAPPING_CONTAINERS):
iter_items = iter(obj.items())
if max_length is not None:
iter_items = islice(iter_items, max_length)
for index, (key, child) in enumerate(iter_items):
child_node = _traverse(child)
child_node.key_repr = to_repr(key)
child_node.last = index == last_item_index
append(child_node)
else:
iter_values = iter(obj)
if max_length is not None:
iter_values = islice(iter_values, max_length)
for index, child in enumerate(iter_values):
child_node = _traverse(child)
child_node.last = index == last_item_index
append(child_node)
if max_length is not None and num_items > max_length:
append(Node(value_repr=f"... +{num_items-max_length}", last=True))
else:
node = Node(empty=empty, children=[], last=root)
pop_visited(obj_id)
else:
node = Node(value_repr=to_repr(obj), last=root)
node.is_tuple = isinstance(obj, tuple)
return node
node = _traverse(_object, root=True)
return node
def pretty_repr(
_object: Any,
*,
max_width: int = 80,
indent_size: int = 4,
max_length: Optional[int] = None,
max_string: Optional[int] = None,
expand_all: bool = False,
) -> str:
if isinstance(_object, Node):
node = _object
else:
node = traverse(_object, max_length=max_length, max_string=max_string)
repr_str = node.render(
max_width=max_width, indent_size=indent_size, expand_all=expand_all
)
return repr_str
def pprint(
_object: Any,
*,
console: Optional["Console"] = None,
indent_guides: bool = True,
max_length: Optional[int] = None,
max_string: Optional[int] = None,
expand_all: bool = False,
) -> None:
_console = get_console() if console is None else console
_console.print(
Pretty(
_object,
max_length=max_length,
max_string=max_string,
indent_guides=indent_guides,
expand_all=expand_all,
overflow="ignore",
),
soft_wrap=True,
)
if __name__ == "__main__":
class BrokenRepr:
def __repr__(self) -> str:
1 / 0
return "this will fail"
d = defaultdict(int)
d["foo"] = 5
data = {
"foo": [
1,
"Hello World!",
100.123,
323.232,
432324.0,
{5, 6, 7, (1, 2, 3, 4), 8},
],
"bar": frozenset({1, 2, 3}),
"defaultdict": defaultdict(
list, {"crumble": ["apple", "rhubarb", "butter", "sugar", "flour"]}
),
"counter": Counter(
[
"apple",
"orange",
"pear",
"kumquat",
"kumquat",
"durian" * 100,
]
),
"atomic": (False, True, None),
"Broken": BrokenRepr(),
}
data["foo"].append(data)
from rich import print
print(Pretty(data, indent_guides=True, max_string=20))
| true
| true
|
f7059188ba5afe67da1d44238af516ec78351d12
| 2,742
|
py
|
Python
|
tensorflow_gan/examples/stargan_estimator/train_test.py
|
jiasenwu/gan
|
f92aeca269365180125d4e4c57c53cbf5e679299
|
[
"Apache-2.0"
] | 1
|
2020-07-30T12:33:56.000Z
|
2020-07-30T12:33:56.000Z
|
tensorflow_gan/examples/stargan_estimator/train_test.py
|
jiasenwu/gan
|
f92aeca269365180125d4e4c57c53cbf5e679299
|
[
"Apache-2.0"
] | null | null | null |
tensorflow_gan/examples/stargan_estimator/train_test.py
|
jiasenwu/gan
|
f92aeca269365180125d4e4c57c53cbf5e679299
|
[
"Apache-2.0"
] | 1
|
2021-05-31T23:19:44.000Z
|
2021-05-31T23:19:44.000Z
|
# coding=utf-8
# Copyright 2019 The TensorFlow GAN Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for stargan_estimator.train."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from tensorflow_gan.examples.stargan_estimator import train_lib
mock = tf.compat.v1.test.mock
def _test_generator(input_images, _):
"""Simple generator function."""
return input_images * tf.compat.v1.get_variable('dummy_g', initializer=2.0)
def _test_discriminator(inputs, num_domains):
"""Differentiable dummy discriminator for StarGAN."""
hidden = tf.compat.v1.layers.flatten(inputs)
output_src = tf.reduce_mean(input_tensor=hidden, axis=1)
output_cls = tf.compat.v1.layers.dense(inputs=hidden, units=num_domains)
return output_src, output_cls
class TrainTest(tf.test.TestCase):
@mock.patch.object(train_lib.data_provider, 'provide_data', autospec=True)
@mock.patch.object(
train_lib.data_provider, 'provide_celeba_test_set', autospec=True)
def test_main(self, mock_provide_celeba_test_set, mock_provide_data):
hparams = train_lib.HParams(
batch_size=1,
patch_size=8,
output_dir='/tmp/tfgan_logdir/stargan/',
generator_lr=1e-4,
discriminator_lr=1e-4,
max_number_of_steps=0,
steps_per_eval=1,
adam_beta1=0.5,
adam_beta2=0.999,
gen_disc_step_ratio=0.2,
master='',
ps_tasks=0,
task=0)
num_domains = 3
# Construct mock inputs.
images_shape = [
hparams.batch_size, hparams.patch_size, hparams.patch_size, 3
]
img_list = [np.zeros(images_shape, dtype=np.float32)] * num_domains
# Create a list of num_domains arrays of shape [batch_size, num_domains].
# Note: assumes hparams.batch_size <= num_domains.
lbl_list = [np.eye(num_domains)[:hparams.batch_size, :]] * num_domains
mock_provide_data.return_value = (img_list, lbl_list)
mock_provide_celeba_test_set.return_value = np.zeros(
[3, hparams.patch_size, hparams.patch_size, 3])
train_lib.train(hparams, _test_generator, _test_discriminator)
if __name__ == '__main__':
tf.test.main()
| 33.439024
| 77
| 0.731947
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from tensorflow_gan.examples.stargan_estimator import train_lib
mock = tf.compat.v1.test.mock
def _test_generator(input_images, _):
return input_images * tf.compat.v1.get_variable('dummy_g', initializer=2.0)
def _test_discriminator(inputs, num_domains):
hidden = tf.compat.v1.layers.flatten(inputs)
output_src = tf.reduce_mean(input_tensor=hidden, axis=1)
output_cls = tf.compat.v1.layers.dense(inputs=hidden, units=num_domains)
return output_src, output_cls
class TrainTest(tf.test.TestCase):
@mock.patch.object(train_lib.data_provider, 'provide_data', autospec=True)
@mock.patch.object(
train_lib.data_provider, 'provide_celeba_test_set', autospec=True)
def test_main(self, mock_provide_celeba_test_set, mock_provide_data):
hparams = train_lib.HParams(
batch_size=1,
patch_size=8,
output_dir='/tmp/tfgan_logdir/stargan/',
generator_lr=1e-4,
discriminator_lr=1e-4,
max_number_of_steps=0,
steps_per_eval=1,
adam_beta1=0.5,
adam_beta2=0.999,
gen_disc_step_ratio=0.2,
master='',
ps_tasks=0,
task=0)
num_domains = 3
images_shape = [
hparams.batch_size, hparams.patch_size, hparams.patch_size, 3
]
img_list = [np.zeros(images_shape, dtype=np.float32)] * num_domains
lbl_list = [np.eye(num_domains)[:hparams.batch_size, :]] * num_domains
mock_provide_data.return_value = (img_list, lbl_list)
mock_provide_celeba_test_set.return_value = np.zeros(
[3, hparams.patch_size, hparams.patch_size, 3])
train_lib.train(hparams, _test_generator, _test_discriminator)
if __name__ == '__main__':
tf.test.main()
| true
| true
|
f70592ac90e928d0c5190de78accdb47db98dc6e
| 1,518
|
py
|
Python
|
app.py
|
aldinaufal21/qual-id
|
c3205256d1483831117b001e3929e5175aff78ee
|
[
"MIT"
] | null | null | null |
app.py
|
aldinaufal21/qual-id
|
c3205256d1483831117b001e3929e5175aff78ee
|
[
"MIT"
] | null | null | null |
app.py
|
aldinaufal21/qual-id
|
c3205256d1483831117b001e3929e5175aff78ee
|
[
"MIT"
] | null | null | null |
from flask import Flask, request, jsonify, render_template, make_response
from qual_id.pattern import Pattern
import random
app = Flask(__name__)
@app.route('/get/', methods=['GET'])
def get_response():
pattern = Pattern(request.args.get("pattern", ""))
number = int(request.args.get("number", 1))
response_obj = {}
if not pattern.is_valid():
response_obj["error"] = "pattern is invalid"
else:
response_obj["data"] = get_qual_ids(pattern, number)
response = make_response(response_obj)
return response
@app.route('/categories/', methods=['GET'])
def categories_response():
response = {'data': Pattern.get_category_options()}
return jsonify(response)
@app.route('/badge-endpoint/', methods=['GET'])
def badge_endpoint_response():
example = get_qual_ids(Pattern('food-animal'), 1)[0]
response_obj = {
"schemaVersion": 1,
"label": "Qual ID",
"message": example,
"color": f"hsl({random.randint(0,359)}, 100%, 50%)"
}
response = make_response(response_obj)
response.headers['Cache-Control'] = 'no-cache, no-store'
return response
def get_qual_ids(pattern, number):
return [get_qual_id(pattern) for _ in range(number)]
def get_qual_id(pattern):
return '-'.join([path.get_random_value() for path in pattern.get_categories()])
@app.route('/')
def index():
return render_template('welcome.html')
if __name__ == '__main__':
# Threaded option to enable multiple instances for multiple user access support
app.run(threaded=True, port=5000)
| 25.3
| 81
| 0.700264
|
from flask import Flask, request, jsonify, render_template, make_response
from qual_id.pattern import Pattern
import random
app = Flask(__name__)
@app.route('/get/', methods=['GET'])
def get_response():
pattern = Pattern(request.args.get("pattern", ""))
number = int(request.args.get("number", 1))
response_obj = {}
if not pattern.is_valid():
response_obj["error"] = "pattern is invalid"
else:
response_obj["data"] = get_qual_ids(pattern, number)
response = make_response(response_obj)
return response
@app.route('/categories/', methods=['GET'])
def categories_response():
response = {'data': Pattern.get_category_options()}
return jsonify(response)
@app.route('/badge-endpoint/', methods=['GET'])
def badge_endpoint_response():
example = get_qual_ids(Pattern('food-animal'), 1)[0]
response_obj = {
"schemaVersion": 1,
"label": "Qual ID",
"message": example,
"color": f"hsl({random.randint(0,359)}, 100%, 50%)"
}
response = make_response(response_obj)
response.headers['Cache-Control'] = 'no-cache, no-store'
return response
def get_qual_ids(pattern, number):
return [get_qual_id(pattern) for _ in range(number)]
def get_qual_id(pattern):
return '-'.join([path.get_random_value() for path in pattern.get_categories()])
@app.route('/')
def index():
return render_template('welcome.html')
if __name__ == '__main__':
app.run(threaded=True, port=5000)
| true
| true
|
f70593151fa24ae167040805a1b24f6d3fdec51d
| 633
|
py
|
Python
|
app/main/forms.py
|
markmumba/personal-blog
|
c2a3a290f1d6ce847e2db4cba2f799b8292889f9
|
[
"MIT"
] | null | null | null |
app/main/forms.py
|
markmumba/personal-blog
|
c2a3a290f1d6ce847e2db4cba2f799b8292889f9
|
[
"MIT"
] | null | null | null |
app/main/forms.py
|
markmumba/personal-blog
|
c2a3a290f1d6ce847e2db4cba2f799b8292889f9
|
[
"MIT"
] | null | null | null |
from flask_wtf import FlaskForm
from wtforms.validators import Required
from wtforms import TextAreaField,SubmitField,StringField
from ..models import User
class UpdateProfile(FlaskForm):
bio = TextAreaField('Update bio.',validators = [Required()])
submit = SubmitField('Update')
class PostAblog (FlaskForm):
title = StringField('Title',validators = [Required()])
content = TextAreaField('Start blogging',validators = [Required()])
submit = SubmitField('Blog')
class PostAComment (FlaskForm):
comment = TextAreaField(validators = [Required()])
submit = SubmitField('Comment',validators = [Required()])
| 35.166667
| 71
| 0.739336
|
from flask_wtf import FlaskForm
from wtforms.validators import Required
from wtforms import TextAreaField,SubmitField,StringField
from ..models import User
class UpdateProfile(FlaskForm):
bio = TextAreaField('Update bio.',validators = [Required()])
submit = SubmitField('Update')
class PostAblog (FlaskForm):
title = StringField('Title',validators = [Required()])
content = TextAreaField('Start blogging',validators = [Required()])
submit = SubmitField('Blog')
class PostAComment (FlaskForm):
comment = TextAreaField(validators = [Required()])
submit = SubmitField('Comment',validators = [Required()])
| true
| true
|
f705942f8f1cc804e3b4671a85ed097d24911237
| 27
|
py
|
Python
|
__init__.py
|
JDavidMoreno/meditative_cards
|
b935a422037c4f3ed076ce1bcd5bcdcbe24f1565
|
[
"MIT"
] | null | null | null |
__init__.py
|
JDavidMoreno/meditative_cards
|
b935a422037c4f3ed076ce1bcd5bcdcbe24f1565
|
[
"MIT"
] | null | null | null |
__init__.py
|
JDavidMoreno/meditative_cards
|
b935a422037c4f3ed076ce1bcd5bcdcbe24f1565
|
[
"MIT"
] | 1
|
2021-04-03T18:00:14.000Z
|
2021-04-03T18:00:14.000Z
|
from . import controllers
| 9
| 25
| 0.777778
|
from . import controllers
| true
| true
|
f70594d9ea97d88b1de224836e5a52dd96a783ea
| 6,523
|
py
|
Python
|
experimental/inject.py
|
LuisCerdenoMota/SHERLOCK
|
5fb52795d3ab44e27bc7dbc6f2c2e6c214995ba1
|
[
"MIT"
] | null | null | null |
experimental/inject.py
|
LuisCerdenoMota/SHERLOCK
|
5fb52795d3ab44e27bc7dbc6f2c2e6c214995ba1
|
[
"MIT"
] | null | null | null |
experimental/inject.py
|
LuisCerdenoMota/SHERLOCK
|
5fb52795d3ab44e27bc7dbc6f2c2e6c214995ba1
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
#::: modules
import numpy as np
import os, sys
import ellc
from transitleastsquares import catalog_info
import astropy.constants as ac
import astropy.units as u
import lightkurve as lk
import pandas as pd
np.random.seed(42)
#::: load data and set the units correctly
TIC_ID = 85400193 # TIC_ID of our candidate
lcf= lk.search_lightcurvefile('TIC '+str(TIC_ID), mission="tess").download_all()
ab, mass, massmin, massmax, radius, radiusmin, radiusmax = catalog_info(TIC_ID=TIC_ID)
#units for ellc
rstar=radius*u.R_sun
mstar=mass*u.M_sun
#mass and radius for the TLS
#rstar=radius
#mstar=mass
mstar_min = mass-massmin
mstar_max = mass+massmax
rstar_min = radius-radiusmin
rstar_max = radius+radiusmax
#uncomment the following lines to check that the parameters used are correct.
#print('\n STELLAR PROPERTIES FOR THE SIGNAL SEARCH')
#print('================================================\n')
#print('limb-darkening estimates using quadratic LD (a,b)=', ab)
#print('mass =', format(mstar,'0.5f'))
#print('mass_min =', format(mstar_min,'0.5f'))
#print('mass_max =', format(mstar_max,'0.5f'))
#print('radius =', format(rstar,'0.5f'))
#print('radius_min =', format(rstar_min,'0.5f'))
#print('radius_max =', format(rstar_max,'0.5f'))
lc=lcf.PDCSAP_FLUX.stitch().remove_nans() # remove of the nans
lc_new=lk.LightCurve(time=lc.time, flux=lc.flux,flux_err=lc.flux_err)
clean=lc_new.remove_outliers(sigma_lower=float('inf'), sigma_upper=3) #remove outliers over 3sigma
flux0=clean.flux
time=clean.time
flux_err = clean.flux_err
#period_maximum=(max(time)-min(time))/2.
#time, flux0 = np.genfromtxt('TESS_phot.csv', delimiter=',', unpack=True)
#rstar = 0.211257 * 41.46650444642 #in Rearth
#::: make model
def make_model(epoch, period, rplanet):
#a = (7.495e-6 * period**2)**(1./3.)*u.au #in AU
P1=period*u.day
a = np.cbrt((ac.G*mstar*P1**2)/(4*np.pi**2)).to(u.au)
#print("radius_1 =", rstar.to(u.au) / a) #star radius convert from AU to in units of a
#print("radius_2 =", rplanet.to(u.au) / a)
texpo=2./60./24.
#print("T_expo = ", texpo,"dy")
#tdur=t14(R_s=radius, M_s=mass,P=period,small_planet=False) #we define the typical duration of a small planet in this star
#print("transit_duration= ", tdur*24*60,"min" )
model = ellc.lc(
t_obs = time,
radius_1 = rstar.to(u.au) / a, #star radius convert from AU to in units of a
radius_2 = rplanet.to(u.au) / a, #convert from Rearth (equatorial) into AU and then into units of a
sbratio = 0,
incl = 90,
light_3 = 0,
t_zero = epoch,
period = period,
a = None,
q = 1e-6,
f_c = None, f_s = None,
ldc_1=[0.2755,0.5493], ldc_2 = None,
gdc_1 = None, gdc_2 = None,
didt = None,
domdt = None,
rotfac_1 = 1, rotfac_2 = 1,
hf_1 = 1.5, hf_2 = 1.5,
bfac_1 = None, bfac_2 = None,
heat_1 = None, heat_2 = None,
lambda_1 = None, lambda_2 = None,
vsini_1 = None, vsini_2 = None,
t_exp=texpo, n_int=None,
grid_1='default', grid_2='default',
ld_1='quad', ld_2=None,
shape_1='sphere', shape_2='sphere',
spots_1=None, spots_2=None,
exact_grav=False, verbose=1)
flux_t = flux0 + model - 1.
if model[0] > 0:
flux = flux_t
flux_err_model = flux_err
time_custom = time
else:
flux = []
time_custom = []
flux_err_model = []
return time_custom, flux, flux_err_model
#minutes=10
#print(len(time))
#print(min(time),max(time))
#bins=len(time)*2./minutes
#print(bins)
#bin_means, bin_edges, binnumber = stats.binned_statistic(time, flux, statistic='mean', bins=bins)
#bin_stds, _, _ = stats.binned_statistic(time, flux, statistic='std', bins=bins)
#bin_width = (bin_edges[1] - bin_edges[0])
#bin_centers = bin_edges[1:] - bin_width/2
#print('RMS PDCSAP flux (ppm): ',np.std(flux0[~np.isnan(flux0)])*1e6)
#print('RMS model (ppm): ',np.std(flux[~np.isnan(flux)])*1e6)
#print('RMS 10min bin detrended (ppm): ',np.std(bin_means[~np.isnan(bin_means)])*1e6)
#fig, (ax1,ax2,ax3) = plt.subplots(3, 1, figsize=(10,5), constrained_layout=True)
##ax1
#ax1.plot(time, flux0, linewidth=0.05 ,color='black', alpha=0.4)
##ax1.legend(bbox_to_anchor=(0.85, 0.95), loc=2, borderaxespad=0.,fontsize=8)
#ax1.set_ylabel("Normalized flux")
#ax1.set_xlim(1766,1769)
##ax2
#ax2.plot(time, flux0, linewidth=0.05 ,color='black', alpha=0.4)
##ax2.plot(time, model, linewidth=0.9 ,color='firebrick', alpha=1)
#ax2.errorbar(time, model, marker='.', markersize=2, color='firebrick', alpha=1, linestyle='none')
#ax2.set_ylabel("Normalized flux")
#ax2.set_xlim(1766,1769)
##ax3
#ax3.plot(time, flux, linewidth=0.1 ,color='teal', alpha=0.5)
#ax3.errorbar(bin_centers, bin_means, marker='.', markersize=4, color='darkorange', alpha=1, linestyle='none')
#ax3.set_ylabel("Normalized flux")
#ax3.set_xlabel("Time (days)")
#ax3.set_xlim(1766,1769)
#plt.savefig('model.png', dpi=200)
def logprint(*text):
# print(*text)
original = sys.stdout
with open( os.path.join('tls/'+'P = '+str(period)+' days, Rp = '+str(rplanet)+'.log'), 'a' ) as f:
sys.stdout = f
print(*text)
sys.stdout = original
#::: iterate through grid of periods and rplanet
dir = "/home/pozuelos/martin/curves"
if not os.path.isdir(dir):
os.mkdir(dir)
max_period = 10
min_period = 0.5
for period in np.arange(min_period, max_period, 0.5):
for t0 in np.arange(time[60], time[60] + period - 0.1, period / 5):
for rplanet in np.arange(4, 0.65, -0.1):
rplanet = np.around(rplanet, decimals=2)*u.R_earth
print('\n')
print('P = '+str(period)+' days, Rp = '+str(rplanet) + ", T0 = " + str(t0))
time_model, flux_model, flux_err_model = make_model(t0, period, rplanet)
file_name = os.path.join(dir + '/P' + str(period) + '_R' + str(rplanet.value) + '_' + str(t0) + '.csv')
lc_df = pd.DataFrame(columns=['#time', 'flux', 'flux_err'])
lc_df['#time'] = time_model
lc_df['flux'] = flux_model
lc_df['flux_err'] = flux_err_model
lc_df.to_csv(file_name, index=False)
| 38.146199
| 126
| 0.62226
|
from __future__ import print_function, division, absolute_import
import numpy as np
import os, sys
import ellc
from transitleastsquares import catalog_info
import astropy.constants as ac
import astropy.units as u
import lightkurve as lk
import pandas as pd
np.random.seed(42)
TIC_ID = 85400193
lcf= lk.search_lightcurvefile('TIC '+str(TIC_ID), mission="tess").download_all()
ab, mass, massmin, massmax, radius, radiusmin, radiusmax = catalog_info(TIC_ID=TIC_ID)
rstar=radius*u.R_sun
mstar=mass*u.M_sun
mstar_min = mass-massmin
mstar_max = mass+massmax
rstar_min = radius-radiusmin
rstar_max = radius+radiusmax
lc=lcf.PDCSAP_FLUX.stitch().remove_nans()
lc_new=lk.LightCurve(time=lc.time, flux=lc.flux,flux_err=lc.flux_err)
clean=lc_new.remove_outliers(sigma_lower=float('inf'), sigma_upper=3)
flux0=clean.flux
time=clean.time
flux_err = clean.flux_err
e_model(epoch, period, rplanet):
1=period*u.day
a = np.cbrt((ac.G*mstar*P1**2)/(4*np.pi**2)).to(u.au)
= rstar.to(u.au) / a,
radius_2 = rplanet.to(u.au) / a,
sbratio = 0,
incl = 90,
light_3 = 0,
t_zero = epoch,
period = period,
a = None,
q = 1e-6,
f_c = None, f_s = None,
ldc_1=[0.2755,0.5493], ldc_2 = None,
gdc_1 = None, gdc_2 = None,
didt = None,
domdt = None,
rotfac_1 = 1, rotfac_2 = 1,
hf_1 = 1.5, hf_2 = 1.5,
bfac_1 = None, bfac_2 = None,
heat_1 = None, heat_2 = None,
lambda_1 = None, lambda_2 = None,
vsini_1 = None, vsini_2 = None,
t_exp=texpo, n_int=None,
grid_1='default', grid_2='default',
ld_1='quad', ld_2=None,
shape_1='sphere', shape_2='sphere',
spots_1=None, spots_2=None,
exact_grav=False, verbose=1)
flux_t = flux0 + model - 1.
if model[0] > 0:
flux = flux_t
flux_err_model = flux_err
time_custom = time
else:
flux = []
time_custom = []
flux_err_model = []
return time_custom, flux, flux_err_model
.join('tls/'+'P = '+str(period)+' days, Rp = '+str(rplanet)+'.log'), 'a' ) as f:
sys.stdout = f
print(*text)
sys.stdout = original
dir = "/home/pozuelos/martin/curves"
if not os.path.isdir(dir):
os.mkdir(dir)
max_period = 10
min_period = 0.5
for period in np.arange(min_period, max_period, 0.5):
for t0 in np.arange(time[60], time[60] + period - 0.1, period / 5):
for rplanet in np.arange(4, 0.65, -0.1):
rplanet = np.around(rplanet, decimals=2)*u.R_earth
print('\n')
print('P = '+str(period)+' days, Rp = '+str(rplanet) + ", T0 = " + str(t0))
time_model, flux_model, flux_err_model = make_model(t0, period, rplanet)
file_name = os.path.join(dir + '/P' + str(period) + '_R' + str(rplanet.value) + '_' + str(t0) + '.csv')
lc_df = pd.DataFrame(columns=['#time', 'flux', 'flux_err'])
lc_df['#time'] = time_model
lc_df['flux'] = flux_model
lc_df['flux_err'] = flux_err_model
lc_df.to_csv(file_name, index=False)
| true
| true
|
f70596147d6743d8b4b0dc81565a51e99185ba66
| 43,146
|
py
|
Python
|
pymclevel/leveldb.py
|
bennettdc/MCEdit-Unified
|
90abfb170c65b877ac67193e717fa3a3ded635dd
|
[
"0BSD"
] | 237
|
2018-02-04T19:13:31.000Z
|
2022-03-26T03:06:07.000Z
|
pymclevel/leveldb.py
|
bennettdc/MCEdit-Unified
|
90abfb170c65b877ac67193e717fa3a3ded635dd
|
[
"0BSD"
] | 551
|
2015-01-01T02:36:53.000Z
|
2018-02-01T00:03:12.000Z
|
pymclevel/leveldb.py
|
bennettdc/MCEdit-Unified
|
90abfb170c65b877ac67193e717fa3a3ded635dd
|
[
"0BSD"
] | 97
|
2015-01-02T01:31:12.000Z
|
2018-01-22T05:37:47.000Z
|
# !/usr/bin/env python
#
# Copyright (C) 2012 Space Monkey, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
"""
LevelDB Python interface via C-Types.
http://code.google.com/p/leveldb-py/
Missing still (but in progress):
* custom comparators, filter policies, caches
This interface requires nothing more than the leveldb shared object with
the C api being installed.
Now requires LevelDB 1.6 or newer.
For most usages, you are likely to only be interested in the "DB" and maybe
the "WriteBatch" classes for construction. The other classes are helper
classes that you may end up using as part of those two root classes.
* DBInterface - This class wraps a LevelDB. Created by either the DB or
MemoryDB constructors
* Iterator - this class is created by calls to DBInterface::iterator.
Supports range requests, seeking, prefix searching, etc
* WriteBatch - this class is a standalone object. You can perform writes
and deletes on it, but nothing happens to your database until you
write the writebatch to the database with DB::write
"""
__author__ = "JT Olds"
__email__ = "jt@spacemonkey.com"
import bisect
import ctypes
import ctypes.util
import weakref
import threading
from collections import namedtuple
import os
import sys
import platform
import directories
# Let have some logging stuff.
import logging
log = logging.getLogger(__name__)
# Here we want to load the file corresponding to the current paltform.
# So, let check for that :)
try:
plat = sys.platform
if plat == 'linux2':
# This library shall not be installed system wide, let take it from the directory where this module is if
# we're running from source, or from the same directory alongside the Linux bundle file.
if getattr(sys, 'frozen', False):
searched = []
p = os.path.dirname(os.path.abspath(__file__))
# When running from a bundle the .so shall be in '<program install directory>/<last part of p>
# Let's try to find it without taking care of the name of the bundle file.
b_dir, so_dir = os.path.split(p)
b_dir = os.path.split(b_dir)[0]
pth = None
while pth is None and b_dir != '/':
_p = os.path.join(b_dir, so_dir)
if os.path.exists(os.path.join(_p, 'libleveldb.so')):
pth = _p
else:
searched.append(_p)
b_dir = os.path.split(b_dir)[0]
if pth is None:
raise IOError("File 'libleveldb.so' not found in any of these places:\n%s" % '\n'.join(searched))
else:
log.info("Found 'libleveldb.so' in %s"%pth)
else:
pth = os.path.dirname(os.path.abspath(__file__))
_ldb = ctypes.CDLL(os.path.join(pth, 'libleveldb.so'))
elif plat == 'darwin':
# since on OSX the program is bundled in a .app archive, shall we use the same (or approching) thecnique as for Linux?
_ldb = ctypes.CDLL(os.path.join(os.path.dirname(os.path.abspath(__file__)), 'libleveldb.dylib'))
elif plat == 'win32':
if getattr(sys, '_MEIPASS', False):
import win32api
win32api.SetDllDirectory(sys._MEIPASS)
DLL_NAME = 'LevelDB-MCPE-32bit.dll'
if platform.architecture()[0] == '64bit' or sys.maxsize > 2**32:
DLL_NAME = 'LevelDB-MCPE-64bit.dll'
#_ldb = ctypes.CDLL(os.path.join(os.path.dirname(os.path.abspath(__file__)), "LevelDB-MCPE.dll"))
_ldb = ctypes.CDLL(str(directories.getDataFile('pymclevel', DLL_NAME)))
log.debug("Binary support v%s.%s for PE 1+ world succesfully loaded." % (_ldb.leveldb_major_version(), _ldb.leveldb_minor_version()))
except Exception as e:
# What shall we do if the library is not found?
# If the library is not loaded, the _ldb object does not exists, and every call to it will crash MCEdit...
# We may import this module using try/except statement.
log.error("The binary support for PE 1+ worlds could not be loaded:")
log.error(e)
raise e
_ldb.leveldb_filterpolicy_create_bloom.argtypes = [ctypes.c_int]
_ldb.leveldb_filterpolicy_create_bloom.restype = ctypes.c_void_p
_ldb.leveldb_filterpolicy_destroy.argtypes = [ctypes.c_void_p]
_ldb.leveldb_filterpolicy_destroy.restype = None
_ldb.leveldb_cache_create_lru.argtypes = [ctypes.c_size_t]
_ldb.leveldb_cache_create_lru.restype = ctypes.c_void_p
_ldb.leveldb_cache_destroy.argtypes = [ctypes.c_void_p]
_ldb.leveldb_cache_destroy.restype = None
_ldb.leveldb_options_create.argtypes = []
_ldb.leveldb_options_create.restype = ctypes.c_void_p
_ldb.leveldb_options_set_filter_policy.argtypes = [ctypes.c_void_p,
ctypes.c_void_p]
_ldb.leveldb_options_set_filter_policy.restype = None
_ldb.leveldb_options_set_create_if_missing.argtypes = [ctypes.c_void_p,
ctypes.c_ubyte]
_ldb.leveldb_options_set_create_if_missing.restype = None
_ldb.leveldb_options_set_error_if_exists.argtypes = [ctypes.c_void_p,
ctypes.c_ubyte]
_ldb.leveldb_options_set_error_if_exists.restype = None
_ldb.leveldb_options_set_paranoid_checks.argtypes = [ctypes.c_void_p,
ctypes.c_ubyte]
_ldb.leveldb_options_set_paranoid_checks.restype = None
_ldb.leveldb_options_set_write_buffer_size.argtypes = [ctypes.c_void_p,
ctypes.c_size_t]
_ldb.leveldb_options_set_write_buffer_size.restype = None
_ldb.leveldb_options_set_max_open_files.argtypes = [ctypes.c_void_p,
ctypes.c_int]
_ldb.leveldb_options_set_max_open_files.restype = None
_ldb.leveldb_options_set_cache.argtypes = [ctypes.c_void_p, ctypes.c_void_p]
_ldb.leveldb_options_set_cache.restype = None
_ldb.leveldb_options_set_block_size.argtypes = [ctypes.c_void_p,
ctypes.c_size_t]
_ldb.leveldb_options_set_block_size.restype = None
_ldb.leveldb_options_destroy.argtypes = [ctypes.c_void_p]
_ldb.leveldb_options_destroy.restype = None
_ldb.leveldb_options_set_compression.argtypes = [ctypes.c_void_p, ctypes.c_int]
_ldb.leveldb_options_set_compression.restype = None
try:
# options obj, index, compressor obj, error checker pointer
_ldb.leveldb_options_set_compressor.argtypes = [ctypes.c_void_p,
ctypes.c_int,
ctypes.c_int]
_ldb.leveldb_options_set_compressor.restype = None
except Exception as exc:
log.debug("ERROR: leveldb::Options.compressors interface could not be accessed:")
log.debug("%s" % exc)
_ldb.leveldb_open.argtypes = [ctypes.c_void_p, ctypes.c_char_p,
ctypes.c_void_p]
_ldb.leveldb_open.restype = ctypes.c_void_p
_ldb.leveldb_close.argtypes = [ctypes.c_void_p]
_ldb.leveldb_close.restype = None
_ldb.leveldb_put.argtypes = [ctypes.c_void_p, ctypes.c_void_p,
ctypes.c_void_p, ctypes.c_size_t, ctypes.c_void_p, ctypes.c_size_t,
ctypes.c_void_p]
_ldb.leveldb_put.restype = None
_ldb.leveldb_delete.argtypes = [ctypes.c_void_p, ctypes.c_void_p,
ctypes.c_void_p, ctypes.c_size_t, ctypes.c_void_p]
_ldb.leveldb_delete.restype = None
_ldb.leveldb_write.argtypes = [ctypes.c_void_p, ctypes.c_void_p,
ctypes.c_void_p, ctypes.c_void_p]
_ldb.leveldb_write.restype = None
_ldb.leveldb_get.argtypes = [ctypes.c_void_p, ctypes.c_void_p,
ctypes.c_void_p, ctypes.c_size_t, ctypes.c_void_p, ctypes.c_void_p]
_ldb.leveldb_get.restype = ctypes.POINTER(ctypes.c_char)
_ldb.leveldb_writeoptions_create.argtypes = []
_ldb.leveldb_writeoptions_create.restype = ctypes.c_void_p
_ldb.leveldb_writeoptions_destroy.argtypes = [ctypes.c_void_p]
_ldb.leveldb_writeoptions_destroy.restype = None
_ldb.leveldb_writeoptions_set_sync.argtypes = [ctypes.c_void_p,
ctypes.c_ubyte]
_ldb.leveldb_writeoptions_set_sync.restype = None
_ldb.leveldb_readoptions_create.argtypes = []
_ldb.leveldb_readoptions_create.restype = ctypes.c_void_p
_ldb.leveldb_readoptions_destroy.argtypes = [ctypes.c_void_p]
_ldb.leveldb_readoptions_destroy.restype = None
_ldb.leveldb_readoptions_set_verify_checksums.argtypes = [ctypes.c_void_p,
ctypes.c_ubyte]
_ldb.leveldb_readoptions_set_verify_checksums.restype = None
_ldb.leveldb_readoptions_set_fill_cache.argtypes = [ctypes.c_void_p,
ctypes.c_ubyte]
_ldb.leveldb_readoptions_set_fill_cache.restype = None
_ldb.leveldb_readoptions_set_snapshot.argtypes = [ctypes.c_void_p,
ctypes.c_void_p]
_ldb.leveldb_readoptions_set_snapshot.restype = None
_ldb.leveldb_create_iterator.argtypes = [ctypes.c_void_p, ctypes.c_void_p]
_ldb.leveldb_create_iterator.restype = ctypes.c_void_p
_ldb.leveldb_iter_destroy.argtypes = [ctypes.c_void_p]
_ldb.leveldb_iter_destroy.restype = None
_ldb.leveldb_iter_valid.argtypes = [ctypes.c_void_p]
_ldb.leveldb_iter_valid.restype = ctypes.c_bool
_ldb.leveldb_iter_key.argtypes = [ctypes.c_void_p,
ctypes.POINTER(ctypes.c_size_t)]
_ldb.leveldb_iter_key.restype = ctypes.c_void_p
_ldb.leveldb_iter_value.argtypes = [ctypes.c_void_p,
ctypes.POINTER(ctypes.c_size_t)]
_ldb.leveldb_iter_value.restype = ctypes.c_void_p
_ldb.leveldb_iter_next.argtypes = [ctypes.c_void_p]
_ldb.leveldb_iter_next.restype = None
_ldb.leveldb_iter_prev.argtypes = [ctypes.c_void_p]
_ldb.leveldb_iter_prev.restype = None
_ldb.leveldb_iter_seek_to_first.argtypes = [ctypes.c_void_p]
_ldb.leveldb_iter_seek_to_first.restype = None
_ldb.leveldb_iter_seek_to_last.argtypes = [ctypes.c_void_p]
_ldb.leveldb_iter_seek_to_last.restype = None
_ldb.leveldb_iter_seek.argtypes = [ctypes.c_void_p, ctypes.c_void_p,
ctypes.c_size_t]
_ldb.leveldb_iter_seek.restype = None
_ldb.leveldb_iter_get_error.argtypes = [ctypes.c_void_p, ctypes.c_void_p]
_ldb.leveldb_iter_get_error.restype = None
_ldb.leveldb_writebatch_create.argtypes = []
_ldb.leveldb_writebatch_create.restype = ctypes.c_void_p
_ldb.leveldb_writebatch_destroy.argtypes = [ctypes.c_void_p]
_ldb.leveldb_writebatch_destroy.restype = None
_ldb.leveldb_writebatch_clear.argtypes = [ctypes.c_void_p]
_ldb.leveldb_writebatch_clear.restype = None
_ldb.leveldb_writebatch_put.argtypes = [ctypes.c_void_p, ctypes.c_void_p,
ctypes.c_size_t, ctypes.c_void_p, ctypes.c_size_t]
_ldb.leveldb_writebatch_put.restype = None
_ldb.leveldb_writebatch_delete.argtypes = [ctypes.c_void_p, ctypes.c_void_p,
ctypes.c_size_t]
_ldb.leveldb_writebatch_delete.restype = None
_ldb.leveldb_approximate_sizes.argtypes = [ctypes.c_void_p, ctypes.c_int,
ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p,
ctypes.c_void_p]
_ldb.leveldb_approximate_sizes.restype = None
_ldb.leveldb_compact_range.argtypes = [ctypes.c_void_p, ctypes.c_void_p,
ctypes.c_size_t, ctypes.c_void_p, ctypes.c_size_t]
_ldb.leveldb_compact_range.restype = None
_ldb.leveldb_create_snapshot.argtypes = [ctypes.c_void_p]
_ldb.leveldb_create_snapshot.restype = ctypes.c_void_p
_ldb.leveldb_release_snapshot.argtypes = [ctypes.c_void_p, ctypes.c_void_p]
_ldb.leveldb_release_snapshot.restype = None
_ldb.leveldb_free.argtypes = [ctypes.c_void_p]
_ldb.leveldb_free.restype = None
Row = namedtuple('Row', 'key value')
def Options():
pass
def WriteOptions():
pass
def ReadOptions():
pass
class Error(Exception):
pass
class ZipCompressionError(Exception):
pass
class Iterator(object):
"""This class is created by calling __iter__ or iterator on a DB interface
"""
__slots__ = ["_prefix", "_impl", "_keys_only"]
def __init__(self, impl, keys_only=False, prefix=None):
self._impl = impl
self._prefix = prefix
self._keys_only = keys_only
def status(self):
pass
Status = status
def Valid(self):
"""Returns whether the iterator is valid or not
@rtype: bool
"""
valid = self._impl.Valid()
if not valid or self._prefix is None:
return valid
key = self._impl.key()
return key[:len(self._prefix)] == self._prefix
def SeekToFirst(self):
"""
Jump to first key in database
@return: self
@rtype: Iter
"""
if self._prefix is not None:
self._impl.seek(self._prefix)
else:
self._impl.SeekToFirst()
return self
def SeekToLast(self):
"""
Jump to last key in database
@return: self
@rtype: Iter
"""
# if we have no prefix or the last possible prefix of this length, just
# seek to the last key in the db.
if self._prefix is None or self._prefix == "\xff" * len(self._prefix):
self._impl.SeekToLast()
return self
# we have a prefix. see if there's anything after our prefix.
# there's probably a much better way to calculate the Next prefix.
hex_prefix = self._prefix.encode('hex')
Next_prefix = hex(long(hex_prefix, 16) + 1)[2:].rstrip("L")
Next_prefix = Next_prefix.rjust(len(hex_prefix), "0")
Next_prefix = Next_prefix.decode("hex").rstrip("\x00")
self._impl.seek(Next_prefix)
if self._impl.Valid():
# there is something after our prefix. we're on it, so step back
self._impl.Prev()
else:
# there is nothing after our prefix, just seek to the last key
self._impl.SeekToLast()
return self
def seek(self, key):
"""Move the iterator to key. This may be called after StopIteration,
allowing you to reuse an iterator safely.
@param key: Where to position the iterator.
@type key: str
@return: self
@rtype: Iter
"""
if self._prefix is not None:
key = self._prefix + key
self._impl.seek(key)
return self
Seek = seek
def key(self):
"""Returns the iterator's current key. You should be sure the iterator
is currently valid first by calling valid()
@rtype: string
"""
key = self._impl.key()
if self._prefix is not None:
return key[len(self._prefix):]
return key
Key = key
def value(self):
"""Returns the iterator's current value. You should be sure the
iterator is currently valid first by calling valid()
@rtype: string
"""
return self._impl.val()
Value = value
def __iter__(self):
return self
def Next(self):
"""Advances the iterator one step. Also returns the current value prior
to moving the iterator
@rtype: Row (namedtuple of key, value) if keys_only=False, otherwise
string (the key)
@raise StopIteration: if called on an iterator that is not valid
"""
if not self.Valid():
raise StopIteration()
if self._keys_only:
rv = self.key()
else:
rv = Row(self.key(), self.value())
self._impl.Next()
return rv
next = Next
def Prev(self):
"""Backs the iterator up one step. Also returns the current value prior
to moving the iterator.
@rtype: Row (namedtuple of key, value) if keys_only=False, otherwise
string (the key)
@raise StopIteration: if called on an iterator that is not valid
"""
if not self.Valid():
raise StopIteration()
if self._keys_only:
rv = self.key()
else:
rv = Row(self.key(), self.value())
self._impl.Prev()
return rv
def stepForward(self):
"""Same as Next but does not return any data or check for validity"""
self._impl.Next()
StepForward = stepForward
def stepBackward(self):
"""Same as Prev but does not return any data or check for validity"""
self._impl.Prev()
StepBackward = stepBackward
def range(self, start_key=None, end_key=None, start_inclusive=True,
end_inclusive=False):
"""A generator for some range of rows"""
if start_key is not None:
self.seek(start_key)
if not start_inclusive and self.key() == start_key:
self._impl.Next()
else:
self.SeekToFirst()
for row in self:
if end_key is not None and (row.key > end_key or (
not end_inclusive and row.key == end_key)):
break
yield row
Range = range
def keys(self):
while self.Valid():
yield self.key()
self.stepForward()
Keys = keys
def values(self):
while self.Valid():
yield self.value()
self.stepForward()
Values = values
def close(self):
self._impl.close()
Close = close
class _OpaqueWriteBatch(object):
"""This is an opaque write batch that must be written to using the putTo
and deleteFrom methods on DBInterface.
"""
def __init__(self):
self._puts = {}
self._deletes = set()
self._private = True
def clear(self):
self._puts = {}
self._deletes = set()
Clear = clear
class WriteBatch(_OpaqueWriteBatch):
"""This class is created stand-alone, but then written to some existing
DBInterface
"""
def __init__(self):
_OpaqueWriteBatch.__init__(self)
self._private = False
def put(self, key, val):
self._deletes.discard(key)
self._puts[key] = val
Put = put
def delete(self, key):
self._puts.pop(key, None)
self._deletes.add(key)
Delete = delete
class DBInterface(object):
"""This class is created through a few different means:
Initially, it can be created using either the DB() or MemoryDB()
module-level methods. In almost every case, you want the DB() method.
You can then get new DBInterfaces from an existing DBInterface by calling
snapshot or scope.
"""
__slots__ = ["_impl", "_prefix", "_allow_close", "_default_sync",
"_default_verify_checksums", "_default_fill_cache"]
def __init__(self, impl, prefix=None, allow_close=False,
default_sync=False, default_verify_checksums=False,
default_fill_cache=True):
self._impl = impl
self._prefix = prefix
self._allow_close = allow_close
self._default_sync = default_sync
self._default_verify_checksums = default_verify_checksums
self._default_fill_cache = default_fill_cache
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def close(self):
if self._allow_close:
self._impl.close()
Close = close
@staticmethod
def newBatch():
return _OpaqueWriteBatch()
NewBatch = newBatch
def put(self, options, key, val, sync=None):
if sync is None:
sync = self._default_sync
if self._prefix is not None:
key = self._prefix + key
self._impl.put(options, key, val, sync=sync)
Put = put
# pylint: disable=W0212
def putTo(self, batch, key, val):
if not batch._private:
raise ValueError("batch not from DBInterface.newBatch")
if self._prefix is not None:
key = self._prefix + key
batch._deletes.discard(key)
batch._puts[key] = val
PutTo = putTo
def delete(self, key, sync=None):
if sync is None:
sync = self._default_sync
if self._prefix is not None:
key = self._prefix + key
self._impl.delete(key, sync=sync)
Delete = delete
# pylint: disable=W0212
def deleteFrom(self, batch, key):
if not batch._private:
raise ValueError("batch not from DBInterface.newBatch")
if self._prefix is not None:
key = self._prefix + key
batch._puts.pop(key, None)
batch._deletes.add(key)
DeleteFrom = deleteFrom
def Get(self, options, key, verify_checksums=None, fill_cache=None):
if verify_checksums is None:
verify_checksums = self._default_verify_checksums
if fill_cache is None:
fill_cache = self._default_fill_cache
if self._prefix is not None:
key = self._prefix + key
return self._impl.Get(None, key, verify_checksums=verify_checksums,
fill_cache=fill_cache)
# pylint: disable=W0212
def write(self, options, batch, sync=None):
if sync is None:
sync = self._default_sync
if self._prefix is not None and not batch._private:
unscoped_batch = _OpaqueWriteBatch()
for key, value in batch._puts.iteritems():
unscoped_batch._puts[self._prefix + key] = value
for key in batch._deletes:
unscoped_batch._deletes.add(self._prefix + key)
batch = unscoped_batch
return self._impl.write(options, batch, sync=sync)
Write = write
def NewIterator(self, options=None, verify_checksums=None, fill_cache=None, prefix=None,
keys_only=False):
if verify_checksums is None:
verify_checksums = self._default_verify_checksums
if fill_cache is None:
fill_cache = self._default_fill_cache
if self._prefix is not None:
if prefix is None:
prefix = self._prefix
else:
prefix = self._prefix + prefix
return Iterator(
self._impl.NewIterator(verify_checksums=verify_checksums,
fill_cache=fill_cache),
keys_only=keys_only, prefix=prefix)
def snapshot(self, default_sync=None, default_verify_checksums=None,
default_fill_cache=None):
if default_sync is None:
default_sync = self._default_sync
if default_verify_checksums is None:
default_verify_checksums = self._default_verify_checksums
if default_fill_cache is None:
default_fill_cache = self._default_fill_cache
return DBInterface(self._impl.snapshot(), prefix=self._prefix,
allow_close=False, default_sync=default_sync,
default_verify_checksums=default_verify_checksums,
default_fill_cache=default_fill_cache)
Snapshot = snapshot
def __iter__(self):
return self.NewIterator().SeekToFirst()
def __getitem__(self, k):
v = self.Get(None, k)
if v is None:
raise KeyError(k)
return v
def __setitem__(self, k, v):
self.put(None, k, v)
def __delitem__(self, k):
self.delete(k)
def __contains__(self, key):
return self.has(key)
def has(self, key, verify_checksums=None, fill_cache=None):
return self.Get(None, key, verify_checksums=verify_checksums,
fill_cache=fill_cache) is not None
Has = has
def scope(self, prefix, default_sync=None, default_verify_checksums=None,
default_fill_cache=None):
if default_sync is None:
default_sync = self._default_sync
if default_verify_checksums is None:
default_verify_checksums = self._default_verify_checksums
if default_fill_cache is None:
default_fill_cache = self._default_fill_cache
if self._prefix is not None:
prefix = self._prefix + prefix
return DBInterface(self._impl, prefix=prefix, allow_close=False,
default_sync=default_sync,
default_verify_checksums=default_verify_checksums,
default_fill_cache=default_fill_cache)
Scope = scope
def range(self, start_key=None, end_key=None, start_inclusive=True,
end_inclusive=False, verify_checksums=None, fill_cache=None):
if verify_checksums is None:
verify_checksums = self._default_verify_checksums
if fill_cache is None:
fill_cache = self._default_fill_cache
return self.NewIterator(verify_checksums=verify_checksums,
fill_cache=fill_cache).range(start_key=start_key,
end_key=end_key, start_inclusive=start_inclusive,
end_inclusive=end_inclusive)
Range = range
def keys(self, verify_checksums=None, fill_cache=None, prefix=None):
if verify_checksums is None:
verify_checksums = self._default_verify_checksums
if fill_cache is None:
fill_cache = self._default_fill_cache
return self.NewIterator(verify_checksums=verify_checksums,
fill_cache=fill_cache, prefix=prefix).SeekToFirst().keys()
Keys = keys
def values(self, verify_checksums=None, fill_cache=None, prefix=None):
if verify_checksums is None:
verify_checksums = self._default_verify_checksums
if fill_cache is None:
fill_cache = self._default_fill_cache
return self.NewIterator(verify_checksums=verify_checksums,
fill_cache=fill_cache, prefix=prefix).SeekToFirst().values()
Values = values
def approximateDiskSizes(self, *ranges):
return self._impl.approximateDiskSizes(*ranges)
ApproximateDiskSizes = approximateDiskSizes
def compactRange(self, start_key, end_key):
return self._impl.compactRange(start_key, end_key)
CompactRange = compactRange
def MemoryDB(*_args, **kwargs):
"""This is primarily for unit testing. If you are doing anything serious,
you definitely are more interested in the standard DB class.
Arguments are ignored.
TODO: if the LevelDB C api ever allows for other environments, actually
use LevelDB code for this, instead of reimplementing it all in
Python.
"""
assert kwargs.get("create_if_missing", True)
return DBInterface(_MemoryDBImpl(), allow_close=True)
class _IteratorMemImpl(object):
__slots__ = ["_data", "_idx"]
def __init__(self, memdb_data):
self._data = memdb_data
self._idx = -1
def Valid(self):
return 0 <= self._idx < len(self._data)
def key(self):
return self._data[self._idx][0]
Key = key
def val(self):
return self._data[self._idx][1]
Val = val
def seek(self, key):
self._idx = bisect.bisect_left(self._data, (key, ""))
Seek = seek
def SeekToFirst(self):
self._idx = 0
def SeekToLast(self):
self._idx = len(self._data) - 1
def Prev(self):
self._idx -= 1
def Next(self):
self._idx += 1
def close(self):
self._data = []
self._idx = -1
Close = close
class _MemoryDBImpl(object):
__slots__ = ["_data", "_lock", "_is_snapshot"]
def __init__(self, data=None, is_snapshot=False):
if data is None:
self._data = []
else:
self._data = data
self._lock = threading.RLock()
self._is_snapshot = is_snapshot
def close(self):
with self._lock:
self._data = []
Close = close
def put(self, options, key, val, **_kwargs):
if self._is_snapshot:
raise TypeError("cannot put on leveldb snapshot")
assert isinstance(key, str)
assert isinstance(val, str)
with self._lock:
idx = bisect.bisect_left(self._data, (key, ""))
if 0 <= idx < len(self._data) and self._data[idx][0] == key:
self._data[idx] = (key, val)
else:
self._data.insert(idx, (key, val))
Put = put
def delete(self, key, **_kwargs):
if self._is_snapshot:
raise TypeError("cannot delete on leveldb snapshot")
with self._lock:
idx = bisect.bisect_left(self._data, (key, ""))
if 0 <= idx < len(self._data) and self._data[idx][0] == key:
del self._data[idx]
Delete = delete
def Get(self, options, key, **_kwargs):
with self._lock:
idx = bisect.bisect_left(self._data, (key, ""))
if 0 <= idx < len(self._data) and self._data[idx][0] == key:
return self._data[idx][1]
return None
# pylint: disable=W0212
def write(self, options, batch, **_kwargs):
if self._is_snapshot:
raise TypeError("cannot write on leveldb snapshot")
with self._lock:
for key, val in batch._puts.iteritems():
self.put(options, key, val)
for key in batch._deletes:
self.delete(key)
Write = write
def NewIterator(self, **_kwargs):
# WARNING: huge performance hit.
# leveldb iterators are actually lightweight snapshots of the data. in
# real leveldb, an iterator won't change its idea of the full database
# even if puts or deletes happen while the iterator is in use. to
# simulate this, there isn't anything simple we can do for now besides
# just copy the whole thing.
with self._lock:
return _IteratorMemImpl(self._data[:])
def approximateDiskSizes(self, *ranges):
if self._is_snapshot:
raise TypeError("cannot calculate disk sizes on leveldb snapshot")
return [0] * len(ranges)
ApproximateDiskSizes = approximateDiskSizes
def compactRange(self, start_key, end_key):
pass
CompactRange = compactRange
def snapshot(self):
if self._is_snapshot:
return self
with self._lock:
return _MemoryDBImpl(data=self._data[:], is_snapshot=True)
Snapshot = snapshot
class _PointerRef(object):
__slots__ = ["ref", "_close", "_referrers", "__weakref__"]
def __init__(self, ref, close_cb):
self.ref = ref
self._close = close_cb
self._referrers = weakref.WeakValueDictionary()
def addReferrer(self, referrer):
self._referrers[id(referrer)] = referrer
AddReferrer = addReferrer
def close(self):
ref, self.ref = self.ref, None
close, self._close = self._close, None
referrers = self._referrers
self._referrers = weakref.WeakValueDictionary()
for referrer in referrers.valuerefs():
referrer = referrer()
if referrer is not None:
referrer.close()
if ref is not None and close is not None:
close(ref)
Close = close
__del__ = close
def _checkError(error):
if bool(error):
message = ctypes.string_at(error)
_ldb.leveldb_free(ctypes.cast(error, ctypes.c_void_p))
_err = Error
if 'corrupted compressed block contents' in message:
_err = ZipCompressionError
raise _err(message)
class _IteratorDbImpl(object):
__slots__ = ["_ref"]
def __init__(self, iterator_ref):
self._ref = iterator_ref
def Valid(self):
return _ldb.leveldb_iter_valid(self._ref.ref)
def key(self):
length = ctypes.c_size_t(0)
val_p = _ldb.leveldb_iter_key(self._ref.ref, ctypes.byref(length))
assert bool(val_p)
return ctypes.string_at(val_p, length.value)
Key = key
def val(self):
length = ctypes.c_size_t(0)
val_p = _ldb.leveldb_iter_value(self._ref.ref, ctypes.byref(length))
assert bool(val_p)
return ctypes.string_at(val_p, length.value)
Val = val
def seek(self, key):
_ldb.leveldb_iter_seek(self._ref.ref, key, len(key))
self._checkError()
Seek = seek
def SeekToFirst(self):
_ldb.leveldb_iter_seek_to_first(self._ref.ref)
self._checkError()
def SeekToLast(self):
_ldb.leveldb_iter_seek_to_last(self._ref.ref)
self._checkError()
def Prev(self):
_ldb.leveldb_iter_prev(self._ref.ref)
self._checkError()
def Next(self):
_ldb.leveldb_iter_next(self._ref.ref)
self._checkError()
def _checkError(self):
error = ctypes.POINTER(ctypes.c_char)()
_ldb.leveldb_iter_get_error(self._ref.ref, ctypes.byref(error))
_checkError(error)
def close(self):
self._ref.close()
Close = close
def DB(options_, path, bloom_filter_size=10, create_if_missing=False,
error_if_exists=False, paranoid_checks=False,
write_buffer_size=(4 * 1024 * 1024), max_open_files=1000,
block_cache_size=(8 * 1024 * 1024), block_size=163840,
default_sync=False, default_verify_checksums=False,
default_fill_cache=True, compressors=(2,)):
"""This is the expected way to open a database. Returns a DBInterface.
"""
filter_policy = _PointerRef(
_ldb.leveldb_filterpolicy_create_bloom(bloom_filter_size),
_ldb.leveldb_filterpolicy_destroy)
cache = _PointerRef(
_ldb.leveldb_cache_create_lru(block_cache_size),
_ldb.leveldb_cache_destroy)
global options
options = _ldb.leveldb_options_create()
# Handling the dual compression in PE 1.2+
# Since the code on Mojang's side is not compatible with this for now,
# let fallback to the prior behaviour calling leveldb_options_set_compression
# with first element in 'compressors'.
if hasattr(_ldb, 'leveldb_options_set_compressor'):
log.debug("Found 'leveldb_options_set_compressors' in _ldb")
if isinstance(compressors, int):
# Old behaviour, only one compressor
_ldb.leveldb_options_set_compression(options, compressors)
elif isinstance(compressors, (list, tuple)):
# Here we need more than one compressors
for i, compr in enumerate(compressors):
if isinstance(compr, int):
_ldb.leveldb_options_set_compressor(options, i, compr)
else:
raise TypeError("Wrong type for compressor #%s: int wanted, %s found (%s)." % (i, type(compr), compr))
else:
_ldb.leveldb_options_set_compression(options, compressors[0])
_ldb.leveldb_options_set_filter_policy(
options, filter_policy.ref)
_ldb.leveldb_options_set_create_if_missing(options, create_if_missing)
_ldb.leveldb_options_set_error_if_exists(options, error_if_exists)
_ldb.leveldb_options_set_paranoid_checks(options, paranoid_checks)
_ldb.leveldb_options_set_write_buffer_size(options, write_buffer_size)
_ldb.leveldb_options_set_max_open_files(options, max_open_files)
_ldb.leveldb_options_set_cache(options, cache.ref)
_ldb.leveldb_options_set_block_size(options, block_size)
error = ctypes.POINTER(ctypes.c_char)()
db = _ldb.leveldb_open(options, path, ctypes.byref(error))
_ldb.leveldb_options_destroy(options)
_checkError(error)
db = _PointerRef(db, _ldb.leveldb_close)
filter_policy.addReferrer(db)
cache.addReferrer(db)
return DBInterface(_LevelDBImpl(db, other_objects=(filter_policy, cache)),
allow_close=True, default_sync=default_sync,
default_verify_checksums=default_verify_checksums,
default_fill_cache=default_fill_cache)
class _LevelDBImpl(object):
__slots__ = ["_objs", "_db", "_snapshot"]
def __init__(self, db_ref, snapshot_ref=None, other_objects=()):
self._objs = other_objects
self._db = db_ref
self._snapshot = snapshot_ref
def close(self):
db, self._db = self._db, None
objs, self._objs = self._objs, ()
if db is not None:
db.close()
for obj in objs:
obj.close()
Close = close
def put(self, options, key, val, sync=False):
if self._snapshot is not None:
raise TypeError("cannot put on leveldb snapshot")
error = ctypes.POINTER(ctypes.c_char)()
options = _ldb.leveldb_writeoptions_create()
_ldb.leveldb_writeoptions_set_sync(options, sync)
_ldb.leveldb_put(self._db.ref, options, key, len(key), val, len(val),
ctypes.byref(error))
_ldb.leveldb_writeoptions_destroy(options)
_checkError(error)
Put = put
def delete(self, key, sync=False):
if self._snapshot is not None:
raise TypeError("cannot delete on leveldb snapshot")
error = ctypes.POINTER(ctypes.c_char)()
options = _ldb.leveldb_writeoptions_create()
_ldb.leveldb_writeoptions_set_sync(options, sync)
_ldb.leveldb_delete(self._db.ref, options, key, len(key),
ctypes.byref(error))
_ldb.leveldb_writeoptions_destroy(options)
_checkError(error)
Delete = delete
def Get(self, options, key, verify_checksums=False, fill_cache=True):
error = ctypes.POINTER(ctypes.c_char)()
options = _ldb.leveldb_readoptions_create()
_ldb.leveldb_readoptions_set_verify_checksums(options,
verify_checksums)
_ldb.leveldb_readoptions_set_fill_cache(options, fill_cache)
if self._snapshot is not None:
_ldb.leveldb_readoptions_set_snapshot(options, self._snapshot.ref)
size = ctypes.c_size_t(0)
val_p = _ldb.leveldb_get(self._db.ref, options, key, len(key),
ctypes.byref(size), ctypes.byref(error))
if bool(val_p):
val = ctypes.string_at(val_p, size.value)
_ldb.leveldb_free(ctypes.cast(val_p, ctypes.c_void_p))
else:
val = None
_ldb.leveldb_readoptions_destroy(options)
_checkError(error)
return val
# pylint: disable=W0212
def write(self, options, batch, sync=False):
if self._snapshot is not None:
raise TypeError("cannot delete on leveldb snapshot")
real_batch = _ldb.leveldb_writebatch_create()
for key, val in batch._puts.iteritems():
_ldb.leveldb_writebatch_put(real_batch, key, len(key), val,
len(val))
for key in batch._deletes:
_ldb.leveldb_writebatch_delete(real_batch, key, len(key))
error = ctypes.POINTER(ctypes.c_char)()
options = _ldb.leveldb_writeoptions_create()
_ldb.leveldb_writeoptions_set_sync(options, sync)
_ldb.leveldb_write(self._db.ref, options, real_batch,
ctypes.byref(error))
_ldb.leveldb_writeoptions_destroy(options)
_ldb.leveldb_writebatch_destroy(real_batch)
_checkError(error)
Write = write
def NewIterator(self, options=None, verify_checksums=False, fill_cache=True):
options = _ldb.leveldb_readoptions_create()
if self._snapshot is not None:
_ldb.leveldb_readoptions_set_snapshot(options, self._snapshot.ref)
_ldb.leveldb_readoptions_set_verify_checksums(
options, verify_checksums)
_ldb.leveldb_readoptions_set_fill_cache(options, fill_cache)
it_ref = _PointerRef(
_ldb.leveldb_create_iterator(self._db.ref, options),
_ldb.leveldb_iter_destroy)
_ldb.leveldb_readoptions_destroy(options)
self._db.addReferrer(it_ref)
return _IteratorDbImpl(it_ref)
def approximateDiskSizes(self, *ranges):
if self._snapshot is not None:
raise TypeError("cannot calculate disk sizes on leveldb snapshot")
assert len(ranges) > 0
key_type = ctypes.c_void_p * len(ranges)
len_type = ctypes.c_size_t * len(ranges)
start_keys, start_lens = key_type(), len_type()
end_keys, end_lens = key_type(), len_type()
sizes = (ctypes.c_uint64 * len(ranges))()
for i, range_ in enumerate(ranges):
assert isinstance(range_, tuple) and len(range_) == 2
assert isinstance(range_[0], str) and isinstance(range_[1], str)
start_keys[i] = ctypes.cast(range_[0], ctypes.c_void_p)
end_keys[i] = ctypes.cast(range_[1], ctypes.c_void_p)
start_lens[i], end_lens[i] = len(range_[0]), len(range_[1])
_ldb.leveldb_approximate_sizes(self._db.ref, len(ranges), start_keys,
start_lens, end_keys, end_lens, sizes)
return list(sizes)
ApproximateDiskSizes = approximateDiskSizes
def compactRange(self, start_key, end_key):
assert isinstance(start_key, str) and isinstance(end_key, str)
_ldb.leveldb_compact_range(self._db.ref, start_key, len(start_key),
end_key, len(end_key))
CompactRange = compactRange
def snapshot(self):
snapshot_ref = _PointerRef(
_ldb.leveldb_create_snapshot(self._db.ref),
lambda ref: _ldb.leveldb_release_snapshot(self._db.ref, ref))
self._db.addReferrer(snapshot_ref)
return _LevelDBImpl(self._db, snapshot_ref=snapshot_ref,
other_objects=self._objs)
Snapshot = snapshot
log.debug("MCEdit-Unified internal PE 1+ support initialized.")
| 36.075251
| 137
| 0.644301
|
__author__ = "JT Olds"
__email__ = "jt@spacemonkey.com"
import bisect
import ctypes
import ctypes.util
import weakref
import threading
from collections import namedtuple
import os
import sys
import platform
import directories
import logging
log = logging.getLogger(__name__)
try:
plat = sys.platform
if plat == 'linux2':
if getattr(sys, 'frozen', False):
searched = []
p = os.path.dirname(os.path.abspath(__file__))
# When running from a bundle the .so shall be in '<program install directory>/<last part of p>
b_dir, so_dir = os.path.split(p)
b_dir = os.path.split(b_dir)[0]
pth = None
while pth is None and b_dir != '/':
_p = os.path.join(b_dir, so_dir)
if os.path.exists(os.path.join(_p, 'libleveldb.so')):
pth = _p
else:
searched.append(_p)
b_dir = os.path.split(b_dir)[0]
if pth is None:
raise IOError("File 'libleveldb.so' not found in any of these places:\n%s" % '\n'.join(searched))
else:
log.info("Found 'libleveldb.so' in %s"%pth)
else:
pth = os.path.dirname(os.path.abspath(__file__))
_ldb = ctypes.CDLL(os.path.join(pth, 'libleveldb.so'))
elif plat == 'darwin':
# since on OSX the program is bundled in a .app archive, shall we use the same (or approching) thecnique as for Linux?
_ldb = ctypes.CDLL(os.path.join(os.path.dirname(os.path.abspath(__file__)), 'libleveldb.dylib'))
elif plat == 'win32':
if getattr(sys, '_MEIPASS', False):
import win32api
win32api.SetDllDirectory(sys._MEIPASS)
DLL_NAME = 'LevelDB-MCPE-32bit.dll'
if platform.architecture()[0] == '64bit' or sys.maxsize > 2**32:
DLL_NAME = 'LevelDB-MCPE-64bit.dll'
#_ldb = ctypes.CDLL(os.path.join(os.path.dirname(os.path.abspath(__file__)), "LevelDB-MCPE.dll"))
_ldb = ctypes.CDLL(str(directories.getDataFile('pymclevel', DLL_NAME)))
log.debug("Binary support v%s.%s for PE 1+ world succesfully loaded." % (_ldb.leveldb_major_version(), _ldb.leveldb_minor_version()))
except Exception as e:
# What shall we do if the library is not found?
# If the library is not loaded, the _ldb object does not exists, and every call to it will crash MCEdit...
# We may import this module using try/except statement.
log.error("The binary support for PE 1+ worlds could not be loaded:")
log.error(e)
raise e
_ldb.leveldb_filterpolicy_create_bloom.argtypes = [ctypes.c_int]
_ldb.leveldb_filterpolicy_create_bloom.restype = ctypes.c_void_p
_ldb.leveldb_filterpolicy_destroy.argtypes = [ctypes.c_void_p]
_ldb.leveldb_filterpolicy_destroy.restype = None
_ldb.leveldb_cache_create_lru.argtypes = [ctypes.c_size_t]
_ldb.leveldb_cache_create_lru.restype = ctypes.c_void_p
_ldb.leveldb_cache_destroy.argtypes = [ctypes.c_void_p]
_ldb.leveldb_cache_destroy.restype = None
_ldb.leveldb_options_create.argtypes = []
_ldb.leveldb_options_create.restype = ctypes.c_void_p
_ldb.leveldb_options_set_filter_policy.argtypes = [ctypes.c_void_p,
ctypes.c_void_p]
_ldb.leveldb_options_set_filter_policy.restype = None
_ldb.leveldb_options_set_create_if_missing.argtypes = [ctypes.c_void_p,
ctypes.c_ubyte]
_ldb.leveldb_options_set_create_if_missing.restype = None
_ldb.leveldb_options_set_error_if_exists.argtypes = [ctypes.c_void_p,
ctypes.c_ubyte]
_ldb.leveldb_options_set_error_if_exists.restype = None
_ldb.leveldb_options_set_paranoid_checks.argtypes = [ctypes.c_void_p,
ctypes.c_ubyte]
_ldb.leveldb_options_set_paranoid_checks.restype = None
_ldb.leveldb_options_set_write_buffer_size.argtypes = [ctypes.c_void_p,
ctypes.c_size_t]
_ldb.leveldb_options_set_write_buffer_size.restype = None
_ldb.leveldb_options_set_max_open_files.argtypes = [ctypes.c_void_p,
ctypes.c_int]
_ldb.leveldb_options_set_max_open_files.restype = None
_ldb.leveldb_options_set_cache.argtypes = [ctypes.c_void_p, ctypes.c_void_p]
_ldb.leveldb_options_set_cache.restype = None
_ldb.leveldb_options_set_block_size.argtypes = [ctypes.c_void_p,
ctypes.c_size_t]
_ldb.leveldb_options_set_block_size.restype = None
_ldb.leveldb_options_destroy.argtypes = [ctypes.c_void_p]
_ldb.leveldb_options_destroy.restype = None
_ldb.leveldb_options_set_compression.argtypes = [ctypes.c_void_p, ctypes.c_int]
_ldb.leveldb_options_set_compression.restype = None
try:
# options obj, index, compressor obj, error checker pointer
_ldb.leveldb_options_set_compressor.argtypes = [ctypes.c_void_p,
ctypes.c_int,
ctypes.c_int]
_ldb.leveldb_options_set_compressor.restype = None
except Exception as exc:
log.debug("ERROR: leveldb::Options.compressors interface could not be accessed:")
log.debug("%s" % exc)
_ldb.leveldb_open.argtypes = [ctypes.c_void_p, ctypes.c_char_p,
ctypes.c_void_p]
_ldb.leveldb_open.restype = ctypes.c_void_p
_ldb.leveldb_close.argtypes = [ctypes.c_void_p]
_ldb.leveldb_close.restype = None
_ldb.leveldb_put.argtypes = [ctypes.c_void_p, ctypes.c_void_p,
ctypes.c_void_p, ctypes.c_size_t, ctypes.c_void_p, ctypes.c_size_t,
ctypes.c_void_p]
_ldb.leveldb_put.restype = None
_ldb.leveldb_delete.argtypes = [ctypes.c_void_p, ctypes.c_void_p,
ctypes.c_void_p, ctypes.c_size_t, ctypes.c_void_p]
_ldb.leveldb_delete.restype = None
_ldb.leveldb_write.argtypes = [ctypes.c_void_p, ctypes.c_void_p,
ctypes.c_void_p, ctypes.c_void_p]
_ldb.leveldb_write.restype = None
_ldb.leveldb_get.argtypes = [ctypes.c_void_p, ctypes.c_void_p,
ctypes.c_void_p, ctypes.c_size_t, ctypes.c_void_p, ctypes.c_void_p]
_ldb.leveldb_get.restype = ctypes.POINTER(ctypes.c_char)
_ldb.leveldb_writeoptions_create.argtypes = []
_ldb.leveldb_writeoptions_create.restype = ctypes.c_void_p
_ldb.leveldb_writeoptions_destroy.argtypes = [ctypes.c_void_p]
_ldb.leveldb_writeoptions_destroy.restype = None
_ldb.leveldb_writeoptions_set_sync.argtypes = [ctypes.c_void_p,
ctypes.c_ubyte]
_ldb.leveldb_writeoptions_set_sync.restype = None
_ldb.leveldb_readoptions_create.argtypes = []
_ldb.leveldb_readoptions_create.restype = ctypes.c_void_p
_ldb.leveldb_readoptions_destroy.argtypes = [ctypes.c_void_p]
_ldb.leveldb_readoptions_destroy.restype = None
_ldb.leveldb_readoptions_set_verify_checksums.argtypes = [ctypes.c_void_p,
ctypes.c_ubyte]
_ldb.leveldb_readoptions_set_verify_checksums.restype = None
_ldb.leveldb_readoptions_set_fill_cache.argtypes = [ctypes.c_void_p,
ctypes.c_ubyte]
_ldb.leveldb_readoptions_set_fill_cache.restype = None
_ldb.leveldb_readoptions_set_snapshot.argtypes = [ctypes.c_void_p,
ctypes.c_void_p]
_ldb.leveldb_readoptions_set_snapshot.restype = None
_ldb.leveldb_create_iterator.argtypes = [ctypes.c_void_p, ctypes.c_void_p]
_ldb.leveldb_create_iterator.restype = ctypes.c_void_p
_ldb.leveldb_iter_destroy.argtypes = [ctypes.c_void_p]
_ldb.leveldb_iter_destroy.restype = None
_ldb.leveldb_iter_valid.argtypes = [ctypes.c_void_p]
_ldb.leveldb_iter_valid.restype = ctypes.c_bool
_ldb.leveldb_iter_key.argtypes = [ctypes.c_void_p,
ctypes.POINTER(ctypes.c_size_t)]
_ldb.leveldb_iter_key.restype = ctypes.c_void_p
_ldb.leveldb_iter_value.argtypes = [ctypes.c_void_p,
ctypes.POINTER(ctypes.c_size_t)]
_ldb.leveldb_iter_value.restype = ctypes.c_void_p
_ldb.leveldb_iter_next.argtypes = [ctypes.c_void_p]
_ldb.leveldb_iter_next.restype = None
_ldb.leveldb_iter_prev.argtypes = [ctypes.c_void_p]
_ldb.leveldb_iter_prev.restype = None
_ldb.leveldb_iter_seek_to_first.argtypes = [ctypes.c_void_p]
_ldb.leveldb_iter_seek_to_first.restype = None
_ldb.leveldb_iter_seek_to_last.argtypes = [ctypes.c_void_p]
_ldb.leveldb_iter_seek_to_last.restype = None
_ldb.leveldb_iter_seek.argtypes = [ctypes.c_void_p, ctypes.c_void_p,
ctypes.c_size_t]
_ldb.leveldb_iter_seek.restype = None
_ldb.leveldb_iter_get_error.argtypes = [ctypes.c_void_p, ctypes.c_void_p]
_ldb.leveldb_iter_get_error.restype = None
_ldb.leveldb_writebatch_create.argtypes = []
_ldb.leveldb_writebatch_create.restype = ctypes.c_void_p
_ldb.leveldb_writebatch_destroy.argtypes = [ctypes.c_void_p]
_ldb.leveldb_writebatch_destroy.restype = None
_ldb.leveldb_writebatch_clear.argtypes = [ctypes.c_void_p]
_ldb.leveldb_writebatch_clear.restype = None
_ldb.leveldb_writebatch_put.argtypes = [ctypes.c_void_p, ctypes.c_void_p,
ctypes.c_size_t, ctypes.c_void_p, ctypes.c_size_t]
_ldb.leveldb_writebatch_put.restype = None
_ldb.leveldb_writebatch_delete.argtypes = [ctypes.c_void_p, ctypes.c_void_p,
ctypes.c_size_t]
_ldb.leveldb_writebatch_delete.restype = None
_ldb.leveldb_approximate_sizes.argtypes = [ctypes.c_void_p, ctypes.c_int,
ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p,
ctypes.c_void_p]
_ldb.leveldb_approximate_sizes.restype = None
_ldb.leveldb_compact_range.argtypes = [ctypes.c_void_p, ctypes.c_void_p,
ctypes.c_size_t, ctypes.c_void_p, ctypes.c_size_t]
_ldb.leveldb_compact_range.restype = None
_ldb.leveldb_create_snapshot.argtypes = [ctypes.c_void_p]
_ldb.leveldb_create_snapshot.restype = ctypes.c_void_p
_ldb.leveldb_release_snapshot.argtypes = [ctypes.c_void_p, ctypes.c_void_p]
_ldb.leveldb_release_snapshot.restype = None
_ldb.leveldb_free.argtypes = [ctypes.c_void_p]
_ldb.leveldb_free.restype = None
Row = namedtuple('Row', 'key value')
def Options():
pass
def WriteOptions():
pass
def ReadOptions():
pass
class Error(Exception):
pass
class ZipCompressionError(Exception):
pass
class Iterator(object):
__slots__ = ["_prefix", "_impl", "_keys_only"]
def __init__(self, impl, keys_only=False, prefix=None):
self._impl = impl
self._prefix = prefix
self._keys_only = keys_only
def status(self):
pass
Status = status
def Valid(self):
valid = self._impl.Valid()
if not valid or self._prefix is None:
return valid
key = self._impl.key()
return key[:len(self._prefix)] == self._prefix
def SeekToFirst(self):
if self._prefix is not None:
self._impl.seek(self._prefix)
else:
self._impl.SeekToFirst()
return self
def SeekToLast(self):
# if we have no prefix or the last possible prefix of this length, just
# seek to the last key in the db.
if self._prefix is None or self._prefix == "\xff" * len(self._prefix):
self._impl.SeekToLast()
return self
# we have a prefix. see if there's anything after our prefix.
hex_prefix = self._prefix.encode('hex')
Next_prefix = hex(long(hex_prefix, 16) + 1)[2:].rstrip("L")
Next_prefix = Next_prefix.rjust(len(hex_prefix), "0")
Next_prefix = Next_prefix.decode("hex").rstrip("\x00")
self._impl.seek(Next_prefix)
if self._impl.Valid():
# there is something after our prefix. we're on it, so step back
self._impl.Prev()
else:
self._impl.SeekToLast()
return self
def seek(self, key):
if self._prefix is not None:
key = self._prefix + key
self._impl.seek(key)
return self
Seek = seek
def key(self):
key = self._impl.key()
if self._prefix is not None:
return key[len(self._prefix):]
return key
Key = key
def value(self):
return self._impl.val()
Value = value
def __iter__(self):
return self
def Next(self):
if not self.Valid():
raise StopIteration()
if self._keys_only:
rv = self.key()
else:
rv = Row(self.key(), self.value())
self._impl.Next()
return rv
next = Next
def Prev(self):
if not self.Valid():
raise StopIteration()
if self._keys_only:
rv = self.key()
else:
rv = Row(self.key(), self.value())
self._impl.Prev()
return rv
def stepForward(self):
self._impl.Next()
StepForward = stepForward
def stepBackward(self):
self._impl.Prev()
StepBackward = stepBackward
def range(self, start_key=None, end_key=None, start_inclusive=True,
end_inclusive=False):
if start_key is not None:
self.seek(start_key)
if not start_inclusive and self.key() == start_key:
self._impl.Next()
else:
self.SeekToFirst()
for row in self:
if end_key is not None and (row.key > end_key or (
not end_inclusive and row.key == end_key)):
break
yield row
Range = range
def keys(self):
while self.Valid():
yield self.key()
self.stepForward()
Keys = keys
def values(self):
while self.Valid():
yield self.value()
self.stepForward()
Values = values
def close(self):
self._impl.close()
Close = close
class _OpaqueWriteBatch(object):
def __init__(self):
self._puts = {}
self._deletes = set()
self._private = True
def clear(self):
self._puts = {}
self._deletes = set()
Clear = clear
class WriteBatch(_OpaqueWriteBatch):
def __init__(self):
_OpaqueWriteBatch.__init__(self)
self._private = False
def put(self, key, val):
self._deletes.discard(key)
self._puts[key] = val
Put = put
def delete(self, key):
self._puts.pop(key, None)
self._deletes.add(key)
Delete = delete
class DBInterface(object):
__slots__ = ["_impl", "_prefix", "_allow_close", "_default_sync",
"_default_verify_checksums", "_default_fill_cache"]
def __init__(self, impl, prefix=None, allow_close=False,
default_sync=False, default_verify_checksums=False,
default_fill_cache=True):
self._impl = impl
self._prefix = prefix
self._allow_close = allow_close
self._default_sync = default_sync
self._default_verify_checksums = default_verify_checksums
self._default_fill_cache = default_fill_cache
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def close(self):
if self._allow_close:
self._impl.close()
Close = close
@staticmethod
def newBatch():
return _OpaqueWriteBatch()
NewBatch = newBatch
def put(self, options, key, val, sync=None):
if sync is None:
sync = self._default_sync
if self._prefix is not None:
key = self._prefix + key
self._impl.put(options, key, val, sync=sync)
Put = put
def putTo(self, batch, key, val):
if not batch._private:
raise ValueError("batch not from DBInterface.newBatch")
if self._prefix is not None:
key = self._prefix + key
batch._deletes.discard(key)
batch._puts[key] = val
PutTo = putTo
def delete(self, key, sync=None):
if sync is None:
sync = self._default_sync
if self._prefix is not None:
key = self._prefix + key
self._impl.delete(key, sync=sync)
Delete = delete
def deleteFrom(self, batch, key):
if not batch._private:
raise ValueError("batch not from DBInterface.newBatch")
if self._prefix is not None:
key = self._prefix + key
batch._puts.pop(key, None)
batch._deletes.add(key)
DeleteFrom = deleteFrom
def Get(self, options, key, verify_checksums=None, fill_cache=None):
if verify_checksums is None:
verify_checksums = self._default_verify_checksums
if fill_cache is None:
fill_cache = self._default_fill_cache
if self._prefix is not None:
key = self._prefix + key
return self._impl.Get(None, key, verify_checksums=verify_checksums,
fill_cache=fill_cache)
def write(self, options, batch, sync=None):
if sync is None:
sync = self._default_sync
if self._prefix is not None and not batch._private:
unscoped_batch = _OpaqueWriteBatch()
for key, value in batch._puts.iteritems():
unscoped_batch._puts[self._prefix + key] = value
for key in batch._deletes:
unscoped_batch._deletes.add(self._prefix + key)
batch = unscoped_batch
return self._impl.write(options, batch, sync=sync)
Write = write
def NewIterator(self, options=None, verify_checksums=None, fill_cache=None, prefix=None,
keys_only=False):
if verify_checksums is None:
verify_checksums = self._default_verify_checksums
if fill_cache is None:
fill_cache = self._default_fill_cache
if self._prefix is not None:
if prefix is None:
prefix = self._prefix
else:
prefix = self._prefix + prefix
return Iterator(
self._impl.NewIterator(verify_checksums=verify_checksums,
fill_cache=fill_cache),
keys_only=keys_only, prefix=prefix)
def snapshot(self, default_sync=None, default_verify_checksums=None,
default_fill_cache=None):
if default_sync is None:
default_sync = self._default_sync
if default_verify_checksums is None:
default_verify_checksums = self._default_verify_checksums
if default_fill_cache is None:
default_fill_cache = self._default_fill_cache
return DBInterface(self._impl.snapshot(), prefix=self._prefix,
allow_close=False, default_sync=default_sync,
default_verify_checksums=default_verify_checksums,
default_fill_cache=default_fill_cache)
Snapshot = snapshot
def __iter__(self):
return self.NewIterator().SeekToFirst()
def __getitem__(self, k):
v = self.Get(None, k)
if v is None:
raise KeyError(k)
return v
def __setitem__(self, k, v):
self.put(None, k, v)
def __delitem__(self, k):
self.delete(k)
def __contains__(self, key):
return self.has(key)
def has(self, key, verify_checksums=None, fill_cache=None):
return self.Get(None, key, verify_checksums=verify_checksums,
fill_cache=fill_cache) is not None
Has = has
def scope(self, prefix, default_sync=None, default_verify_checksums=None,
default_fill_cache=None):
if default_sync is None:
default_sync = self._default_sync
if default_verify_checksums is None:
default_verify_checksums = self._default_verify_checksums
if default_fill_cache is None:
default_fill_cache = self._default_fill_cache
if self._prefix is not None:
prefix = self._prefix + prefix
return DBInterface(self._impl, prefix=prefix, allow_close=False,
default_sync=default_sync,
default_verify_checksums=default_verify_checksums,
default_fill_cache=default_fill_cache)
Scope = scope
def range(self, start_key=None, end_key=None, start_inclusive=True,
end_inclusive=False, verify_checksums=None, fill_cache=None):
if verify_checksums is None:
verify_checksums = self._default_verify_checksums
if fill_cache is None:
fill_cache = self._default_fill_cache
return self.NewIterator(verify_checksums=verify_checksums,
fill_cache=fill_cache).range(start_key=start_key,
end_key=end_key, start_inclusive=start_inclusive,
end_inclusive=end_inclusive)
Range = range
def keys(self, verify_checksums=None, fill_cache=None, prefix=None):
if verify_checksums is None:
verify_checksums = self._default_verify_checksums
if fill_cache is None:
fill_cache = self._default_fill_cache
return self.NewIterator(verify_checksums=verify_checksums,
fill_cache=fill_cache, prefix=prefix).SeekToFirst().keys()
Keys = keys
def values(self, verify_checksums=None, fill_cache=None, prefix=None):
if verify_checksums is None:
verify_checksums = self._default_verify_checksums
if fill_cache is None:
fill_cache = self._default_fill_cache
return self.NewIterator(verify_checksums=verify_checksums,
fill_cache=fill_cache, prefix=prefix).SeekToFirst().values()
Values = values
def approximateDiskSizes(self, *ranges):
return self._impl.approximateDiskSizes(*ranges)
ApproximateDiskSizes = approximateDiskSizes
def compactRange(self, start_key, end_key):
return self._impl.compactRange(start_key, end_key)
CompactRange = compactRange
def MemoryDB(*_args, **kwargs):
assert kwargs.get("create_if_missing", True)
return DBInterface(_MemoryDBImpl(), allow_close=True)
class _IteratorMemImpl(object):
__slots__ = ["_data", "_idx"]
def __init__(self, memdb_data):
self._data = memdb_data
self._idx = -1
def Valid(self):
return 0 <= self._idx < len(self._data)
def key(self):
return self._data[self._idx][0]
Key = key
def val(self):
return self._data[self._idx][1]
Val = val
def seek(self, key):
self._idx = bisect.bisect_left(self._data, (key, ""))
Seek = seek
def SeekToFirst(self):
self._idx = 0
def SeekToLast(self):
self._idx = len(self._data) - 1
def Prev(self):
self._idx -= 1
def Next(self):
self._idx += 1
def close(self):
self._data = []
self._idx = -1
Close = close
class _MemoryDBImpl(object):
__slots__ = ["_data", "_lock", "_is_snapshot"]
def __init__(self, data=None, is_snapshot=False):
if data is None:
self._data = []
else:
self._data = data
self._lock = threading.RLock()
self._is_snapshot = is_snapshot
def close(self):
with self._lock:
self._data = []
Close = close
def put(self, options, key, val, **_kwargs):
if self._is_snapshot:
raise TypeError("cannot put on leveldb snapshot")
assert isinstance(key, str)
assert isinstance(val, str)
with self._lock:
idx = bisect.bisect_left(self._data, (key, ""))
if 0 <= idx < len(self._data) and self._data[idx][0] == key:
self._data[idx] = (key, val)
else:
self._data.insert(idx, (key, val))
Put = put
def delete(self, key, **_kwargs):
if self._is_snapshot:
raise TypeError("cannot delete on leveldb snapshot")
with self._lock:
idx = bisect.bisect_left(self._data, (key, ""))
if 0 <= idx < len(self._data) and self._data[idx][0] == key:
del self._data[idx]
Delete = delete
def Get(self, options, key, **_kwargs):
with self._lock:
idx = bisect.bisect_left(self._data, (key, ""))
if 0 <= idx < len(self._data) and self._data[idx][0] == key:
return self._data[idx][1]
return None
def write(self, options, batch, **_kwargs):
if self._is_snapshot:
raise TypeError("cannot write on leveldb snapshot")
with self._lock:
for key, val in batch._puts.iteritems():
self.put(options, key, val)
for key in batch._deletes:
self.delete(key)
Write = write
def NewIterator(self, **_kwargs):
# even if puts or deletes happen while the iterator is in use. to
# simulate this, there isn't anything simple we can do for now besides
with self._lock:
return _IteratorMemImpl(self._data[:])
def approximateDiskSizes(self, *ranges):
if self._is_snapshot:
raise TypeError("cannot calculate disk sizes on leveldb snapshot")
return [0] * len(ranges)
ApproximateDiskSizes = approximateDiskSizes
def compactRange(self, start_key, end_key):
pass
CompactRange = compactRange
def snapshot(self):
if self._is_snapshot:
return self
with self._lock:
return _MemoryDBImpl(data=self._data[:], is_snapshot=True)
Snapshot = snapshot
class _PointerRef(object):
__slots__ = ["ref", "_close", "_referrers", "__weakref__"]
def __init__(self, ref, close_cb):
self.ref = ref
self._close = close_cb
self._referrers = weakref.WeakValueDictionary()
def addReferrer(self, referrer):
self._referrers[id(referrer)] = referrer
AddReferrer = addReferrer
def close(self):
ref, self.ref = self.ref, None
close, self._close = self._close, None
referrers = self._referrers
self._referrers = weakref.WeakValueDictionary()
for referrer in referrers.valuerefs():
referrer = referrer()
if referrer is not None:
referrer.close()
if ref is not None and close is not None:
close(ref)
Close = close
__del__ = close
def _checkError(error):
if bool(error):
message = ctypes.string_at(error)
_ldb.leveldb_free(ctypes.cast(error, ctypes.c_void_p))
_err = Error
if 'corrupted compressed block contents' in message:
_err = ZipCompressionError
raise _err(message)
class _IteratorDbImpl(object):
__slots__ = ["_ref"]
def __init__(self, iterator_ref):
self._ref = iterator_ref
def Valid(self):
return _ldb.leveldb_iter_valid(self._ref.ref)
def key(self):
length = ctypes.c_size_t(0)
val_p = _ldb.leveldb_iter_key(self._ref.ref, ctypes.byref(length))
assert bool(val_p)
return ctypes.string_at(val_p, length.value)
Key = key
def val(self):
length = ctypes.c_size_t(0)
val_p = _ldb.leveldb_iter_value(self._ref.ref, ctypes.byref(length))
assert bool(val_p)
return ctypes.string_at(val_p, length.value)
Val = val
def seek(self, key):
_ldb.leveldb_iter_seek(self._ref.ref, key, len(key))
self._checkError()
Seek = seek
def SeekToFirst(self):
_ldb.leveldb_iter_seek_to_first(self._ref.ref)
self._checkError()
def SeekToLast(self):
_ldb.leveldb_iter_seek_to_last(self._ref.ref)
self._checkError()
def Prev(self):
_ldb.leveldb_iter_prev(self._ref.ref)
self._checkError()
def Next(self):
_ldb.leveldb_iter_next(self._ref.ref)
self._checkError()
def _checkError(self):
error = ctypes.POINTER(ctypes.c_char)()
_ldb.leveldb_iter_get_error(self._ref.ref, ctypes.byref(error))
_checkError(error)
def close(self):
self._ref.close()
Close = close
def DB(options_, path, bloom_filter_size=10, create_if_missing=False,
error_if_exists=False, paranoid_checks=False,
write_buffer_size=(4 * 1024 * 1024), max_open_files=1000,
block_cache_size=(8 * 1024 * 1024), block_size=163840,
default_sync=False, default_verify_checksums=False,
default_fill_cache=True, compressors=(2,)):
filter_policy = _PointerRef(
_ldb.leveldb_filterpolicy_create_bloom(bloom_filter_size),
_ldb.leveldb_filterpolicy_destroy)
cache = _PointerRef(
_ldb.leveldb_cache_create_lru(block_cache_size),
_ldb.leveldb_cache_destroy)
global options
options = _ldb.leveldb_options_create()
# let fallback to the prior behaviour calling leveldb_options_set_compression
# with first element in 'compressors'.
if hasattr(_ldb, 'leveldb_options_set_compressor'):
log.debug("Found 'leveldb_options_set_compressors' in _ldb")
if isinstance(compressors, int):
# Old behaviour, only one compressor
_ldb.leveldb_options_set_compression(options, compressors)
elif isinstance(compressors, (list, tuple)):
# Here we need more than one compressors
for i, compr in enumerate(compressors):
if isinstance(compr, int):
_ldb.leveldb_options_set_compressor(options, i, compr)
else:
raise TypeError("Wrong type for compressor #%s: int wanted, %s found (%s)." % (i, type(compr), compr))
else:
_ldb.leveldb_options_set_compression(options, compressors[0])
_ldb.leveldb_options_set_filter_policy(
options, filter_policy.ref)
_ldb.leveldb_options_set_create_if_missing(options, create_if_missing)
_ldb.leveldb_options_set_error_if_exists(options, error_if_exists)
_ldb.leveldb_options_set_paranoid_checks(options, paranoid_checks)
_ldb.leveldb_options_set_write_buffer_size(options, write_buffer_size)
_ldb.leveldb_options_set_max_open_files(options, max_open_files)
_ldb.leveldb_options_set_cache(options, cache.ref)
_ldb.leveldb_options_set_block_size(options, block_size)
error = ctypes.POINTER(ctypes.c_char)()
db = _ldb.leveldb_open(options, path, ctypes.byref(error))
_ldb.leveldb_options_destroy(options)
_checkError(error)
db = _PointerRef(db, _ldb.leveldb_close)
filter_policy.addReferrer(db)
cache.addReferrer(db)
return DBInterface(_LevelDBImpl(db, other_objects=(filter_policy, cache)),
allow_close=True, default_sync=default_sync,
default_verify_checksums=default_verify_checksums,
default_fill_cache=default_fill_cache)
class _LevelDBImpl(object):
__slots__ = ["_objs", "_db", "_snapshot"]
def __init__(self, db_ref, snapshot_ref=None, other_objects=()):
self._objs = other_objects
self._db = db_ref
self._snapshot = snapshot_ref
def close(self):
db, self._db = self._db, None
objs, self._objs = self._objs, ()
if db is not None:
db.close()
for obj in objs:
obj.close()
Close = close
def put(self, options, key, val, sync=False):
if self._snapshot is not None:
raise TypeError("cannot put on leveldb snapshot")
error = ctypes.POINTER(ctypes.c_char)()
options = _ldb.leveldb_writeoptions_create()
_ldb.leveldb_writeoptions_set_sync(options, sync)
_ldb.leveldb_put(self._db.ref, options, key, len(key), val, len(val),
ctypes.byref(error))
_ldb.leveldb_writeoptions_destroy(options)
_checkError(error)
Put = put
def delete(self, key, sync=False):
if self._snapshot is not None:
raise TypeError("cannot delete on leveldb snapshot")
error = ctypes.POINTER(ctypes.c_char)()
options = _ldb.leveldb_writeoptions_create()
_ldb.leveldb_writeoptions_set_sync(options, sync)
_ldb.leveldb_delete(self._db.ref, options, key, len(key),
ctypes.byref(error))
_ldb.leveldb_writeoptions_destroy(options)
_checkError(error)
Delete = delete
def Get(self, options, key, verify_checksums=False, fill_cache=True):
error = ctypes.POINTER(ctypes.c_char)()
options = _ldb.leveldb_readoptions_create()
_ldb.leveldb_readoptions_set_verify_checksums(options,
verify_checksums)
_ldb.leveldb_readoptions_set_fill_cache(options, fill_cache)
if self._snapshot is not None:
_ldb.leveldb_readoptions_set_snapshot(options, self._snapshot.ref)
size = ctypes.c_size_t(0)
val_p = _ldb.leveldb_get(self._db.ref, options, key, len(key),
ctypes.byref(size), ctypes.byref(error))
if bool(val_p):
val = ctypes.string_at(val_p, size.value)
_ldb.leveldb_free(ctypes.cast(val_p, ctypes.c_void_p))
else:
val = None
_ldb.leveldb_readoptions_destroy(options)
_checkError(error)
return val
# pylint: disable=W0212
def write(self, options, batch, sync=False):
if self._snapshot is not None:
raise TypeError("cannot delete on leveldb snapshot")
real_batch = _ldb.leveldb_writebatch_create()
for key, val in batch._puts.iteritems():
_ldb.leveldb_writebatch_put(real_batch, key, len(key), val,
len(val))
for key in batch._deletes:
_ldb.leveldb_writebatch_delete(real_batch, key, len(key))
error = ctypes.POINTER(ctypes.c_char)()
options = _ldb.leveldb_writeoptions_create()
_ldb.leveldb_writeoptions_set_sync(options, sync)
_ldb.leveldb_write(self._db.ref, options, real_batch,
ctypes.byref(error))
_ldb.leveldb_writeoptions_destroy(options)
_ldb.leveldb_writebatch_destroy(real_batch)
_checkError(error)
Write = write
def NewIterator(self, options=None, verify_checksums=False, fill_cache=True):
options = _ldb.leveldb_readoptions_create()
if self._snapshot is not None:
_ldb.leveldb_readoptions_set_snapshot(options, self._snapshot.ref)
_ldb.leveldb_readoptions_set_verify_checksums(
options, verify_checksums)
_ldb.leveldb_readoptions_set_fill_cache(options, fill_cache)
it_ref = _PointerRef(
_ldb.leveldb_create_iterator(self._db.ref, options),
_ldb.leveldb_iter_destroy)
_ldb.leveldb_readoptions_destroy(options)
self._db.addReferrer(it_ref)
return _IteratorDbImpl(it_ref)
def approximateDiskSizes(self, *ranges):
if self._snapshot is not None:
raise TypeError("cannot calculate disk sizes on leveldb snapshot")
assert len(ranges) > 0
key_type = ctypes.c_void_p * len(ranges)
len_type = ctypes.c_size_t * len(ranges)
start_keys, start_lens = key_type(), len_type()
end_keys, end_lens = key_type(), len_type()
sizes = (ctypes.c_uint64 * len(ranges))()
for i, range_ in enumerate(ranges):
assert isinstance(range_, tuple) and len(range_) == 2
assert isinstance(range_[0], str) and isinstance(range_[1], str)
start_keys[i] = ctypes.cast(range_[0], ctypes.c_void_p)
end_keys[i] = ctypes.cast(range_[1], ctypes.c_void_p)
start_lens[i], end_lens[i] = len(range_[0]), len(range_[1])
_ldb.leveldb_approximate_sizes(self._db.ref, len(ranges), start_keys,
start_lens, end_keys, end_lens, sizes)
return list(sizes)
ApproximateDiskSizes = approximateDiskSizes
def compactRange(self, start_key, end_key):
assert isinstance(start_key, str) and isinstance(end_key, str)
_ldb.leveldb_compact_range(self._db.ref, start_key, len(start_key),
end_key, len(end_key))
CompactRange = compactRange
def snapshot(self):
snapshot_ref = _PointerRef(
_ldb.leveldb_create_snapshot(self._db.ref),
lambda ref: _ldb.leveldb_release_snapshot(self._db.ref, ref))
self._db.addReferrer(snapshot_ref)
return _LevelDBImpl(self._db, snapshot_ref=snapshot_ref,
other_objects=self._objs)
Snapshot = snapshot
log.debug("MCEdit-Unified internal PE 1+ support initialized.")
| true
| true
|
f7059649cace577ed483d9e7b5ab728bae8e0607
| 12,268
|
py
|
Python
|
VMBackup/main/PluginHost.py
|
jamvar/azure-linux-extensions
|
66610daae2ef09f7920d9c4aa2e99a3035fe76a6
|
[
"Apache-2.0"
] | 2
|
2021-11-02T00:16:29.000Z
|
2022-02-17T12:08:42.000Z
|
VMBackup/main/PluginHost.py
|
jamvar/azure-linux-extensions
|
66610daae2ef09f7920d9c4aa2e99a3035fe76a6
|
[
"Apache-2.0"
] | 3
|
2019-07-29T20:25:09.000Z
|
2019-08-13T00:00:45.000Z
|
VMBackup/main/PluginHost.py
|
ChrisCoe/azure-linux-extensions
|
1ca6fce15eca3ddefc33651b094c9a4b4e52fa31
|
[
"Apache-2.0"
] | 1
|
2017-07-17T18:52:10.000Z
|
2017-07-17T18:52:10.000Z
|
import time
import sys
import os
import threading
try:
import ConfigParser as ConfigParsers
except ImportError:
import configparser as ConfigParsers
from common import CommonVariables
from pwd import getpwuid
from stat import *
import traceback
# [pre_post]
# "timeout" : (in seconds),
#
# .... other params ...
#
# "pluginName0" : "oracle_plugin", the python plugin file will have same name
# "pluginPath0" : "/abc/xyz/"
# "pluginConfigPath0" : "sdf/sdf/abcd.json"
#
#
# errorcode policy
# errorcode = 0 (CommonVariables.PrePost_PluginStatus_Successs), means success, script runs without error, warnings maybe possible
# errorcode = 5 (CommonVariables.PrePost_PluginStatus_Timeout), means timeout
# errorcode = 10 (CommonVariables.PrePost_PluginStatus_ConfigNotFound), config file not found
# errorcode = process return code, means bash script encountered some other error, like 127 for script not found
class PluginHostError(object):
def __init__(self, errorCode, pluginName):
self.errorCode = errorCode
self.pluginName = pluginName
def __str__(self):
return 'Plugin :- ', self.pluginName , ' ErrorCode :- ' + str(self.errorCode)
class PluginHostResult(object):
def __init__(self):
self.errors = []
self.anyScriptFailed = False
self.continueBackup = True
self.errorCode = 0
self.fileCode = []
self.filePath = []
def __str__(self):
errorStr = ''
for error in self.errors:
errorStr += (str(error)) + '\n'
errorStr += 'Final Error Code :- ' + str(self.errorCode) + '\n'
errorStr += 'Any script Failed :- ' + str(self.anyScriptFailed) + '\n'
errorStr += 'Continue Backup :- ' + str(self.continueBackup) + '\n'
return errorStr
class PluginHost(object):
""" description of class """
def __init__(self, logger):
self.logger = logger
self.modulesLoaded = False
self.configLocation = '/etc/azure/VMSnapshotPluginHost.conf'
self.timeoutInSeconds = 1800
self.plugins = []
self.pluginName = []
self.noOfPlugins = 0
self.preScriptCompleted = []
self.preScriptResult = []
self.postScriptCompleted = []
self.postScriptResult = []
def pre_check(self):
self.logger.log('Loading script modules now...',True,'Info')
errorCode = CommonVariables.PrePost_PluginStatus_Success
dobackup = True
fsFreeze_on = True
if not os.path.isfile(self.configLocation):
self.logger.log('Plugin host Config file does not exist in the location ' + self.configLocation, True)
self.configLocation = './main/VMSnapshotPluginHost.conf'
permissions = self.get_permissions(self.configLocation)
if not os.path.isfile(self.configLocation):
self.logger.log('Plugin host Config file does not exist in the location ' + self.configLocation, True)
errorCode =CommonVariables.FailedPrepostPluginhostConfigNotFound
elif not (int(permissions[1]) == 0 or int(permissions[1]) == 4) or not (int(permissions[2]) == 0 or int(permissions[2]) == 4):
self.logger.log('Plugin host Config file does not have desired permissions', True, 'Error')
errorCode = CommonVariables.FailedPrepostPluginhostConfigPermissionError
elif not self.find_owner(self.configLocation) == 'root':
self.logger.log('The owner of the Plugin host Config file ' + self.configLocation + ' is ' + self.find_owner(self.configLocation) + ' but not root', True, 'Error')
errorCode = CommonVariables.FailedPrepostPluginhostConfigPermissionError
else :
errorCode,dobackup,fsFreeze_on = self.load_modules()
return errorCode,dobackup,fsFreeze_on
def load_modules(self):
# Imports all plugin modules using the information in config.json
# and initializes basic class variables associated with each plugin
len = 0
errorCode = CommonVariables.PrePost_PluginStatus_Success
dobackup = True
fsFreeze_on = True
try:
self.logger.log('config file: '+str(self.configLocation),True,'Info')
config = ConfigParsers.ConfigParser()
config.read(self.configLocation)
if (config.has_option('pre_post', 'timeoutInSeconds')):
self.timeoutInSeconds = min(int(config.get('pre_post','timeoutInSeconds')),self.timeoutInSeconds)
if (config.has_option('pre_post', 'numberOfPlugins')):
len = int(config.get('pre_post','numberOfPlugins'))
self.logger.log('timeoutInSeconds: '+str(self.timeoutInSeconds),True,'Info')
self.logger.log('numberOfPlugins: '+str(len),True,'Info')
while len > 0:
pname = config.get('pre_post','pluginName'+str(self.noOfPlugins))
ppath = config.get('pre_post','pluginPath'+str(self.noOfPlugins))
pcpath = config.get('pre_post','pluginConfigPath'+str(self.noOfPlugins))
self.logger.log('Name of the Plugin is ' + pname, True)
self.logger.log('Plugin config path is ' + pcpath, True)
errorCode = CommonVariables.PrePost_PluginStatus_Success
dobackup = True
if os.path.isfile(pcpath):
permissions = self.get_permissions(pcpath)
if (int(permissions[0]) %2 == 1) or int(permissions[1]) > 0 or int(permissions[2]) > 0:
self.logger.log('Plugin Config file does not have desired permissions', True, 'Error')
errorCode = CommonVariables.FailedPrepostPluginConfigPermissionError
if not self.find_owner(pcpath) == 'root':
self.logger.log('The owner of the Plugin Config file ' + pcpath + ' is ' + self.find_owner(pcpath) + ' but not root', True, 'Error')
errorCode = CommonVariables.FailedPrepostPluginConfigPermissionError
else:
self.logger.log('Plugin host file does not exist in the location ' + pcpath, True, 'Error')
errorCode = CommonVariables.FailedPrepostPluginConfigNotFound
if(errorCode == CommonVariables.PrePost_PluginStatus_Success):
sys.path.append(ppath)
plugin = __import__(pname)
self.plugins.append(plugin.ScriptRunner(logger=self.logger,name=pname,configPath=pcpath,maxTimeOut=self.timeoutInSeconds))
errorCode,dobackup,fsFreeze_on = self.plugins[self.noOfPlugins].validate_scripts()
self.noOfPlugins = self.noOfPlugins + 1
self.pluginName.append(pname)
self.preScriptCompleted.append(False)
self.preScriptResult.append(None)
self.postScriptCompleted.append(False)
self.postScriptResult.append(None)
len = len - 1
if self.noOfPlugins != 0:
self.modulesLoaded = True
except Exception as err:
errMsg = 'Error in reading PluginHost config file : %s, stack trace: %s' % (str(err), traceback.format_exc())
self.logger.log(errMsg, True, 'Error')
errorCode = CommonVariables.FailedPrepostPluginhostConfigParsing
return errorCode,dobackup,fsFreeze_on
def find_owner(self, filename):
file_owner = ''
try:
file_owner = getpwuid(os.stat(filename).st_uid).pw_name
except Exception as err:
errMsg = 'Error in fetching owner of the file : ' + filename + ': %s, stack trace: %s' % (str(err), traceback.format_exc())
self.logger.log(errMsg, True, 'Error')
return file_owner
def get_permissions(self, filename):
permissions = '777'
try:
permissions = oct(os.stat(filename)[ST_MODE])[-3:]
self.logger.log('Permisisons of the file ' + filename + ' are ' + permissions,True)
except Exception as err:
errMsg = 'Error in fetching permissions of the file : ' + filename + ': %s, stack trace: %s' % (str(err), traceback.format_exc())
self.logger.log(errMsg, True, 'Error')
return permissions
def pre_script(self):
# Runs pre_script() for all plugins and maintains a timer
result = PluginHostResult()
curr = 0
for plugin in self.plugins:
t1 = threading.Thread(target=plugin.pre_script, args=(curr, self.preScriptCompleted, self.preScriptResult))
t1.start()
curr = curr + 1
flag = True
for i in range(0,((self.timeoutInSeconds)/5)+2): #waiting 10 more seconds to escape race condition between Host and script timing out
time.sleep(5)
flag = True
for j in range(0,self.noOfPlugins):
flag = flag & self.preScriptCompleted[j]
if flag:
break
continueBackup = True
#Plugin timed out
if not flag:
ecode = CommonVariables.FailedPrepostPluginhostPreTimeout
result.anyScriptFailed = True
presult = PluginHostError(errorCode = ecode, pluginName = self.pluginName[j])
result.errors.append(presult)
else:
for j in range(0,self.noOfPlugins):
ecode = CommonVariables.FailedPrepostPluginhostPreTimeout
continueBackup = continueBackup & self.preScriptResult[j].continueBackup
if self.preScriptCompleted[j]:
ecode = self.preScriptResult[j].errorCode
if ecode != CommonVariables.PrePost_PluginStatus_Success:
result.anyScriptFailed = True
presult = PluginHostError(errorCode = ecode, pluginName = self.pluginName[j])
result.errors.append(presult)
result.continueBackup = continueBackup
self.logger.log('Finished prescript execution from PluginHost side. Continue Backup: '+str(continueBackup),True,'Info')
return result
def post_script(self):
# Runs post_script() for all plugins and maintains a timer
result = PluginHostResult()
if not self.modulesLoaded:
return result
self.logger.log('Starting postscript for all modules.',True,'Info')
curr = 0
for plugin in self.plugins:
t1 = threading.Thread(target=plugin.post_script, args=(curr, self.postScriptCompleted, self.postScriptResult))
t1.start()
curr = curr + 1
flag = True
for i in range(0,((self.timeoutInSeconds)/5)+2): #waiting 10 more seconds to escape race condition between Host and script timing out
time.sleep(5)
flag = True
for j in range(0,self.noOfPlugins):
flag = flag & self.postScriptCompleted[j]
if flag:
break
continueBackup = True
#Plugin timed out
if not flag:
ecode = CommonVariables.FailedPrepostPluginhostPostTimeout
result.anyScriptFailed = True
presult = PluginHostError(errorCode = ecode, pluginName = self.pluginName[j])
result.errors.append(presult)
else:
for j in range(0,self.noOfPlugins):
ecode = CommonVariables.FailedPrepostPluginhostPostTimeout
continueBackup = continueBackup & self.postScriptResult[j].continueBackup
if self.postScriptCompleted[j]:
ecode = self.postScriptResult[j].errorCode
if ecode != CommonVariables.PrePost_PluginStatus_Success:
result.anyScriptFailed = True
presult = PluginHostError(errorCode = ecode, pluginName = self.pluginName[j])
result.errors.append(presult)
result.continueBackup = continueBackup
self.logger.log('Finished postscript execution from PluginHost side. Continue Backup: '+str(continueBackup),True,'Info')
return result
| 43.814286
| 175
| 0.623329
|
import time
import sys
import os
import threading
try:
import ConfigParser as ConfigParsers
except ImportError:
import configparser as ConfigParsers
from common import CommonVariables
from pwd import getpwuid
from stat import *
import traceback
class PluginHostError(object):
def __init__(self, errorCode, pluginName):
self.errorCode = errorCode
self.pluginName = pluginName
def __str__(self):
return 'Plugin :- ', self.pluginName , ' ErrorCode :- ' + str(self.errorCode)
class PluginHostResult(object):
def __init__(self):
self.errors = []
self.anyScriptFailed = False
self.continueBackup = True
self.errorCode = 0
self.fileCode = []
self.filePath = []
def __str__(self):
errorStr = ''
for error in self.errors:
errorStr += (str(error)) + '\n'
errorStr += 'Final Error Code :- ' + str(self.errorCode) + '\n'
errorStr += 'Any script Failed :- ' + str(self.anyScriptFailed) + '\n'
errorStr += 'Continue Backup :- ' + str(self.continueBackup) + '\n'
return errorStr
class PluginHost(object):
def __init__(self, logger):
self.logger = logger
self.modulesLoaded = False
self.configLocation = '/etc/azure/VMSnapshotPluginHost.conf'
self.timeoutInSeconds = 1800
self.plugins = []
self.pluginName = []
self.noOfPlugins = 0
self.preScriptCompleted = []
self.preScriptResult = []
self.postScriptCompleted = []
self.postScriptResult = []
def pre_check(self):
self.logger.log('Loading script modules now...',True,'Info')
errorCode = CommonVariables.PrePost_PluginStatus_Success
dobackup = True
fsFreeze_on = True
if not os.path.isfile(self.configLocation):
self.logger.log('Plugin host Config file does not exist in the location ' + self.configLocation, True)
self.configLocation = './main/VMSnapshotPluginHost.conf'
permissions = self.get_permissions(self.configLocation)
if not os.path.isfile(self.configLocation):
self.logger.log('Plugin host Config file does not exist in the location ' + self.configLocation, True)
errorCode =CommonVariables.FailedPrepostPluginhostConfigNotFound
elif not (int(permissions[1]) == 0 or int(permissions[1]) == 4) or not (int(permissions[2]) == 0 or int(permissions[2]) == 4):
self.logger.log('Plugin host Config file does not have desired permissions', True, 'Error')
errorCode = CommonVariables.FailedPrepostPluginhostConfigPermissionError
elif not self.find_owner(self.configLocation) == 'root':
self.logger.log('The owner of the Plugin host Config file ' + self.configLocation + ' is ' + self.find_owner(self.configLocation) + ' but not root', True, 'Error')
errorCode = CommonVariables.FailedPrepostPluginhostConfigPermissionError
else :
errorCode,dobackup,fsFreeze_on = self.load_modules()
return errorCode,dobackup,fsFreeze_on
def load_modules(self):
len = 0
errorCode = CommonVariables.PrePost_PluginStatus_Success
dobackup = True
fsFreeze_on = True
try:
self.logger.log('config file: '+str(self.configLocation),True,'Info')
config = ConfigParsers.ConfigParser()
config.read(self.configLocation)
if (config.has_option('pre_post', 'timeoutInSeconds')):
self.timeoutInSeconds = min(int(config.get('pre_post','timeoutInSeconds')),self.timeoutInSeconds)
if (config.has_option('pre_post', 'numberOfPlugins')):
len = int(config.get('pre_post','numberOfPlugins'))
self.logger.log('timeoutInSeconds: '+str(self.timeoutInSeconds),True,'Info')
self.logger.log('numberOfPlugins: '+str(len),True,'Info')
while len > 0:
pname = config.get('pre_post','pluginName'+str(self.noOfPlugins))
ppath = config.get('pre_post','pluginPath'+str(self.noOfPlugins))
pcpath = config.get('pre_post','pluginConfigPath'+str(self.noOfPlugins))
self.logger.log('Name of the Plugin is ' + pname, True)
self.logger.log('Plugin config path is ' + pcpath, True)
errorCode = CommonVariables.PrePost_PluginStatus_Success
dobackup = True
if os.path.isfile(pcpath):
permissions = self.get_permissions(pcpath)
if (int(permissions[0]) %2 == 1) or int(permissions[1]) > 0 or int(permissions[2]) > 0:
self.logger.log('Plugin Config file does not have desired permissions', True, 'Error')
errorCode = CommonVariables.FailedPrepostPluginConfigPermissionError
if not self.find_owner(pcpath) == 'root':
self.logger.log('The owner of the Plugin Config file ' + pcpath + ' is ' + self.find_owner(pcpath) + ' but not root', True, 'Error')
errorCode = CommonVariables.FailedPrepostPluginConfigPermissionError
else:
self.logger.log('Plugin host file does not exist in the location ' + pcpath, True, 'Error')
errorCode = CommonVariables.FailedPrepostPluginConfigNotFound
if(errorCode == CommonVariables.PrePost_PluginStatus_Success):
sys.path.append(ppath)
plugin = __import__(pname)
self.plugins.append(plugin.ScriptRunner(logger=self.logger,name=pname,configPath=pcpath,maxTimeOut=self.timeoutInSeconds))
errorCode,dobackup,fsFreeze_on = self.plugins[self.noOfPlugins].validate_scripts()
self.noOfPlugins = self.noOfPlugins + 1
self.pluginName.append(pname)
self.preScriptCompleted.append(False)
self.preScriptResult.append(None)
self.postScriptCompleted.append(False)
self.postScriptResult.append(None)
len = len - 1
if self.noOfPlugins != 0:
self.modulesLoaded = True
except Exception as err:
errMsg = 'Error in reading PluginHost config file : %s, stack trace: %s' % (str(err), traceback.format_exc())
self.logger.log(errMsg, True, 'Error')
errorCode = CommonVariables.FailedPrepostPluginhostConfigParsing
return errorCode,dobackup,fsFreeze_on
def find_owner(self, filename):
file_owner = ''
try:
file_owner = getpwuid(os.stat(filename).st_uid).pw_name
except Exception as err:
errMsg = 'Error in fetching owner of the file : ' + filename + ': %s, stack trace: %s' % (str(err), traceback.format_exc())
self.logger.log(errMsg, True, 'Error')
return file_owner
def get_permissions(self, filename):
permissions = '777'
try:
permissions = oct(os.stat(filename)[ST_MODE])[-3:]
self.logger.log('Permisisons of the file ' + filename + ' are ' + permissions,True)
except Exception as err:
errMsg = 'Error in fetching permissions of the file : ' + filename + ': %s, stack trace: %s' % (str(err), traceback.format_exc())
self.logger.log(errMsg, True, 'Error')
return permissions
def pre_script(self):
result = PluginHostResult()
curr = 0
for plugin in self.plugins:
t1 = threading.Thread(target=plugin.pre_script, args=(curr, self.preScriptCompleted, self.preScriptResult))
t1.start()
curr = curr + 1
flag = True
for i in range(0,((self.timeoutInSeconds)/5)+2):
time.sleep(5)
flag = True
for j in range(0,self.noOfPlugins):
flag = flag & self.preScriptCompleted[j]
if flag:
break
continueBackup = True
if not flag:
ecode = CommonVariables.FailedPrepostPluginhostPreTimeout
result.anyScriptFailed = True
presult = PluginHostError(errorCode = ecode, pluginName = self.pluginName[j])
result.errors.append(presult)
else:
for j in range(0,self.noOfPlugins):
ecode = CommonVariables.FailedPrepostPluginhostPreTimeout
continueBackup = continueBackup & self.preScriptResult[j].continueBackup
if self.preScriptCompleted[j]:
ecode = self.preScriptResult[j].errorCode
if ecode != CommonVariables.PrePost_PluginStatus_Success:
result.anyScriptFailed = True
presult = PluginHostError(errorCode = ecode, pluginName = self.pluginName[j])
result.errors.append(presult)
result.continueBackup = continueBackup
self.logger.log('Finished prescript execution from PluginHost side. Continue Backup: '+str(continueBackup),True,'Info')
return result
def post_script(self):
result = PluginHostResult()
if not self.modulesLoaded:
return result
self.logger.log('Starting postscript for all modules.',True,'Info')
curr = 0
for plugin in self.plugins:
t1 = threading.Thread(target=plugin.post_script, args=(curr, self.postScriptCompleted, self.postScriptResult))
t1.start()
curr = curr + 1
flag = True
for i in range(0,((self.timeoutInSeconds)/5)+2):
time.sleep(5)
flag = True
for j in range(0,self.noOfPlugins):
flag = flag & self.postScriptCompleted[j]
if flag:
break
continueBackup = True
if not flag:
ecode = CommonVariables.FailedPrepostPluginhostPostTimeout
result.anyScriptFailed = True
presult = PluginHostError(errorCode = ecode, pluginName = self.pluginName[j])
result.errors.append(presult)
else:
for j in range(0,self.noOfPlugins):
ecode = CommonVariables.FailedPrepostPluginhostPostTimeout
continueBackup = continueBackup & self.postScriptResult[j].continueBackup
if self.postScriptCompleted[j]:
ecode = self.postScriptResult[j].errorCode
if ecode != CommonVariables.PrePost_PluginStatus_Success:
result.anyScriptFailed = True
presult = PluginHostError(errorCode = ecode, pluginName = self.pluginName[j])
result.errors.append(presult)
result.continueBackup = continueBackup
self.logger.log('Finished postscript execution from PluginHost side. Continue Backup: '+str(continueBackup),True,'Info')
return result
| true
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.