hexsha stringlengths 40 40 | size int64 4 996k | ext stringclasses 8
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 245 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 245 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 245 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 4 996k | avg_line_length float64 1.33 58.2k | max_line_length int64 2 323k | alphanum_fraction float64 0 0.97 | content_no_comment stringlengths 0 946k | is_comment_constant_removed bool 2
classes | is_sharp_comment_removed bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f726a1bf8f792d5b4f9b9594c9f703b8b87a1151 | 1,391 | py | Python | geniza/footnotes/migrations/0015_add_footnote_location_pp.py | kmcelwee/geniza | 0e59134e35357d4f80d85bf1e423edbc29d1edfb | [
"Apache-2.0"
] | null | null | null | geniza/footnotes/migrations/0015_add_footnote_location_pp.py | kmcelwee/geniza | 0e59134e35357d4f80d85bf1e423edbc29d1edfb | [
"Apache-2.0"
] | 5 | 2020-09-22T17:35:24.000Z | 2020-09-22T19:45:46.000Z | geniza/footnotes/migrations/0015_add_footnote_location_pp.py | kmcelwee/geniza | 0e59134e35357d4f80d85bf1e423edbc29d1edfb | [
"Apache-2.0"
] | null | null | null | # Generated by Django 3.2.6 on 2021-12-15 21:28
from django.db import migrations
from django.db.models import F, Value
from django.db.models.functions import Concat
def add_pp_to_footnote_pages(apps, schema_editor):
# for footnotes that start with with a numeric location,
# we want to add pp. to make the meaning clearer
# on the front end
Footnote = apps.get_model("footnotes", "Footnote")
# find and update footnotes to based on location contents
# first, find footnotes with purely numeric location (i.e., single page number)
# prefix with p.
Footnote.objects.filter(location__regex=r"^\d+$").update(
location=Concat(Value("p. "), F("location"))
)
# next, find footnotes that start with numeric values
# - exclude location that starts with numeric followed by a hebrew letter
# (currently only one, 49ב) — this is a document location, not a page number
# - find all other footnotes with locations that start with a number
Footnote.objects.exclude(location__regex=r"^\d+[\u0590-\u05fe]").filter(
location__regex=r"^\d"
).update(location=Concat(Value("pp. "), F("location")))
class Migration(migrations.Migration):
dependencies = [
("footnotes", "0014_alter_source_edition"),
]
operations = [
migrations.RunPython(add_pp_to_footnote_pages, migrations.RunPython.noop)
]
| 34.775 | 83 | 0.700216 |
from django.db import migrations
from django.db.models import F, Value
from django.db.models.functions import Concat
def add_pp_to_footnote_pages(apps, schema_editor):
Footnote = apps.get_model("footnotes", "Footnote")
Footnote.objects.filter(location__regex=r"^\d+$").update(
location=Concat(Value("p. "), F("location"))
)
Footnote.objects.exclude(location__regex=r"^\d+[\u0590-\u05fe]").filter(
location__regex=r"^\d"
).update(location=Concat(Value("pp. "), F("location")))
class Migration(migrations.Migration):
dependencies = [
("footnotes", "0014_alter_source_edition"),
]
operations = [
migrations.RunPython(add_pp_to_footnote_pages, migrations.RunPython.noop)
]
| true | true |
f726a26cfd9c320455025cb39ec2e30c2b3335a0 | 76,817 | py | Python | spyder/plugins/ipythonconsole/tests/test_ipythonconsole.py | dan123456-eng/spyder | e57751e01d09a35b8f0583f9efd8dce318b17b4e | [
"MIT"
] | 1 | 2022-02-23T16:50:02.000Z | 2022-02-23T16:50:02.000Z | spyder/plugins/ipythonconsole/tests/test_ipythonconsole.py | dan123456-eng/spyder | e57751e01d09a35b8f0583f9efd8dce318b17b4e | [
"MIT"
] | null | null | null | spyder/plugins/ipythonconsole/tests/test_ipythonconsole.py | dan123456-eng/spyder | e57751e01d09a35b8f0583f9efd8dce318b17b4e | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright © Spyder Project Contributors
#
# Licensed under the terms of the MIT License
# (see spyder/__init__.py for details)
# -----------------------------------------------------------------------------
"""
Tests for the IPython console plugin.
"""
# Standard library imports
import codecs
import glob
import os
import os.path as osp
import psutil
import shutil
import sys
import tempfile
from textwrap import dedent
import threading
import traceback
from unittest.mock import Mock
# Third party imports
import IPython
from IPython.core import release as ipy_release
from IPython.core.application import get_ipython_dir
from flaky import flaky
from pkg_resources import parse_version
from pygments.token import Name
import pytest
from qtpy import PYQT5
from qtpy.QtCore import Qt
from qtpy.QtWebEngineWidgets import WEBENGINE
from qtpy.QtWidgets import QMessageBox, QMainWindow
import sympy
# Local imports
from spyder.config.base import get_home_dir, running_in_ci
from spyder.config.gui import get_color_scheme
from spyder.config.manager import ConfigurationManager
from spyder.py3compat import PY2, to_text_string
from spyder.plugins.help.tests.test_plugin import check_text
from spyder.plugins.help.utils.sphinxify import CSS_PATH
from spyder.plugins.ipythonconsole.plugin import IPythonConsole
from spyder.plugins.ipythonconsole.utils.style import create_style_class
from spyder.plugins.ipythonconsole.widgets import ClientWidget
from spyder.utils.programs import get_temp_dir
from spyder.utils.conda import is_conda_env
# =============================================================================
# Constants
# =============================================================================
SHELL_TIMEOUT = 20000
TEMP_DIRECTORY = tempfile.gettempdir()
NON_ASCII_DIR = osp.join(TEMP_DIRECTORY, u'測試', u'اختبار')
NEW_DIR = 'new_workingdir'
# =============================================================================
# Utillity Functions
# =============================================================================
def get_console_font_color(syntax_style):
styles = create_style_class(syntax_style).styles
font_color = styles[Name]
return font_color
def get_console_background_color(style_sheet):
background_color = style_sheet.split('background-color:')[1]
background_color = background_color.split(';')[0]
return background_color
def get_conda_test_env(test_env_name=u'spytest-ž'):
"""Return the full prefix path of the given `test_env_name`."""
if 'envs' in sys.prefix:
root_prefix = os.path.dirname(os.path.dirname(sys.prefix))
else:
root_prefix = sys.prefix
test_env_prefix = os.path.join(root_prefix, 'envs', test_env_name)
if os.name == 'nt':
test_env_executable = os.path.join(test_env_prefix, 'python.exe')
else:
test_env_executable = os.path.join(test_env_prefix, 'bin', 'python')
return test_env_executable
# =============================================================================
# Qt Test Fixtures
# =============================================================================
@pytest.fixture
def ipyconsole(qtbot, request, tmpdir):
"""IPython console fixture."""
configuration = ConfigurationManager(conf_path=str(tmpdir))
class MainWindowMock(QMainWindow):
def get_spyder_pythonpath(self):
return configuration.get('main', 'spyder_pythonpath', [])
def __getattr__(self, attr):
if attr == 'consoles_menu_actions':
return []
elif attr == 'editor':
return None
else:
return Mock()
# Tests assume inline backend
configuration.set('ipython_console', 'pylab/backend', 0)
# Start in a new working directory the console
use_startup_wdir = request.node.get_closest_marker('use_startup_wdir')
if use_startup_wdir:
new_wdir = osp.join(os.getcwd(), NEW_DIR)
if not osp.exists(new_wdir):
os.mkdir(new_wdir)
configuration.set('workingdir', 'console/use_fixed_directory', True)
configuration.set('workingdir', 'console/fixed_directory', new_wdir)
else:
configuration.set('workingdir', 'console/use_fixed_directory', False)
configuration.set(
'workingdir', 'console/fixed_directory', get_home_dir())
# Test the console with a non-ascii temp dir
non_ascii_dir = request.node.get_closest_marker('non_ascii_dir')
if non_ascii_dir:
test_dir = NON_ASCII_DIR
else:
test_dir = ''
# Instruct the console to not use a stderr file
no_stderr_file = request.node.get_closest_marker('no_stderr_file')
if no_stderr_file:
test_no_stderr = 'True'
else:
test_no_stderr = ''
# Use the automatic backend if requested
auto_backend = request.node.get_closest_marker('auto_backend')
if auto_backend:
configuration.set('ipython_console', 'pylab/backend', 1)
# Use the Tkinter backend if requested
tk_backend = request.node.get_closest_marker('tk_backend')
if tk_backend:
configuration.set('ipython_console', 'pylab/backend', 8)
# Start a Pylab client if requested
pylab_client = request.node.get_closest_marker('pylab_client')
is_pylab = True if pylab_client else False
# Start a Sympy client if requested
sympy_client = request.node.get_closest_marker('sympy_client')
is_sympy = True if sympy_client else False
# Start a Cython client if requested
cython_client = request.node.get_closest_marker('cython_client')
is_cython = True if cython_client else False
# Use an external interpreter if requested
external_interpreter = request.node.get_closest_marker(
'external_interpreter')
if external_interpreter:
configuration.set('main_interpreter', 'default', False)
configuration.set('main_interpreter', 'executable', sys.executable)
else:
configuration.set('main_interpreter', 'default', True)
configuration.set('main_interpreter', 'executable', '')
# Use the test environment interpreter if requested
test_environment_interpreter = request.node.get_closest_marker(
'test_environment_interpreter')
if test_environment_interpreter:
configuration.set('main_interpreter', 'default', False)
configuration.set(
'main_interpreter', 'executable', get_conda_test_env())
else:
configuration.set('main_interpreter', 'default', True)
configuration.set('main_interpreter', 'executable', '')
# Conf css_path in the Appeareance plugin
configuration.set('appearance', 'css_path', CSS_PATH)
# Create the console and a new client and set environment
os.environ['IPYCONSOLE_TESTING'] = 'True'
os.environ['IPYCONSOLE_TEST_DIR'] = test_dir
os.environ['IPYCONSOLE_TEST_NO_STDERR'] = test_no_stderr
window = MainWindowMock()
console = IPythonConsole(parent=window, configuration=configuration)
console._register()
console.create_new_client(is_pylab=is_pylab,
is_sympy=is_sympy,
is_cython=is_cython)
window.setCentralWidget(console.get_widget())
# Set exclamation mark to True
configuration.set('ipython_console', 'pdb_use_exclamation_mark', True)
# This segfaults on macOS
if not sys.platform == "darwin":
qtbot.addWidget(window)
window.resize(640, 480)
window.show()
# Wait until the window is fully up
shell = console.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
# Check for thread or open file leaks
known_leak = request.node.get_closest_marker('known_leak')
if os.name != 'nt' and not known_leak:
# _DummyThread are created if current_thread() is called from them.
# They will always leak (From python doc) so we ignore them.
init_threads = [
repr(thread) for thread in threading.enumerate()
if not isinstance(thread, threading._DummyThread)]
proc = psutil.Process()
init_files = [repr(f) for f in proc.open_files()]
init_subprocesses = [repr(f) for f in proc.children()]
yield console
# Print shell content if failed
if request.node.rep_setup.passed:
if request.node.rep_call.failed:
# Print content of shellwidget and close window
print(console.get_current_shellwidget(
)._control.toPlainText())
client = console.get_current_client()
if client.info_page != client.blank_page:
print('info_page')
print(client.info_page)
# Close
console.on_close()
window.close()
os.environ.pop('IPYCONSOLE_TESTING')
os.environ.pop('IPYCONSOLE_TEST_DIR')
os.environ.pop('IPYCONSOLE_TEST_NO_STDERR')
if os.name == 'nt' or known_leak:
# Do not test for leaks
return
def show_diff(init_list, now_list, name):
sys.stderr.write(f"Extra {name} before test:\n")
for item in init_list:
if item in now_list:
now_list.remove(item)
else:
sys.stderr.write(item + "\n")
sys.stderr.write(f"Extra {name} after test:\n")
for item in now_list:
sys.stderr.write(item + "\n")
# The test is not allowed to open new files or threads.
try:
def threads_condition():
threads = [
thread for thread in threading.enumerate()
if not isinstance(thread, threading._DummyThread)]
return (len(init_threads) >= len(threads))
qtbot.waitUntil(threads_condition, timeout=SHELL_TIMEOUT)
except Exception:
now_threads = [
thread for thread in threading.enumerate()
if not isinstance(thread, threading._DummyThread)]
threads = [repr(t) for t in now_threads]
show_diff(init_threads, threads, "thread")
sys.stderr.write("Running Threads stacks:\n")
now_thread_ids = [t.ident for t in now_threads]
for threadId, frame in sys._current_frames().items():
if threadId in now_thread_ids:
sys.stderr.write("\nThread " + str(threads) + ":\n")
traceback.print_stack(frame)
raise
try:
# -1 from closed client
qtbot.waitUntil(lambda: (
len(init_subprocesses) - 1 >= len(proc.children())),
timeout=SHELL_TIMEOUT)
except Exception:
subprocesses = [repr(f) for f in proc.children()]
show_diff(init_subprocesses, subprocesses, "processes")
raise
try:
qtbot.waitUntil(
lambda: (len(init_files) >= len(proc.open_files())),
timeout=SHELL_TIMEOUT)
except Exception:
files = [repr(f) for f in proc.open_files()]
show_diff(init_files, files, "files")
raise
# =============================================================================
# Tests
# =============================================================================
@flaky(max_runs=3)
@pytest.mark.external_interpreter
def test_banners(ipyconsole, qtbot):
"""Test that console banners are generated correctly."""
shell = ipyconsole.get_current_shellwidget()
control = shell._control
# Long banner
text = control.toPlainText().splitlines()
if "Update LANGUAGE_CODES" in text[0]:
text = text[1:]
while not text[0].strip():
text = text[1:]
py_ver = sys.version.splitlines()[0].strip()
assert py_ver in text[0] # Python version in first line
assert 'license' in text[1] # 'license' mention in second line
assert '' == text[2] # Third line is empty
assert ipy_release.version in text[3] # Fourth line is IPython
# Short banner
short_banner = shell.short_banner()
py_ver = sys.version.split(' ')[0]
expected = 'Python %s -- IPython %s' % (py_ver, ipy_release.version)
assert expected == short_banner
@flaky(max_runs=3)
@pytest.mark.parametrize(
"function,signature,documentation",
[("arange",
["start", "stop"],
["Return evenly spaced values within a given interval.<br>",
"<br>Python built-in `range` function, but returns an ndarray ..."]),
("vectorize",
["pyfunc", "otype", "signature"],
["Generalized function class.<br>",
"Define a vectorized function which takes a nested sequence ..."]),
("absolute",
["x", "/", "out"],
["Parameters<br>", "x : array_like ..."])]
)
@pytest.mark.skipif(not os.name == 'nt',
reason="Times out on macOS and fails on Linux")
def test_get_calltips(ipyconsole, qtbot, function, signature, documentation):
"""Test that calltips show the documentation."""
shell = ipyconsole.get_current_shellwidget()
control = shell._control
# Import numpy
with qtbot.waitSignal(shell.executed):
shell.execute('import numpy as np')
# Write an object in the console that should generate a calltip
# and wait for the kernel to send its response.
with qtbot.waitSignal(shell.kernel_client.shell_channel.message_received):
qtbot.keyClicks(control, 'np.' + function + '(')
# Wait a little bit for the calltip to appear
qtbot.waitUntil(lambda: control.calltip_widget.isVisible())
# Assert we displayed a calltip
assert control.calltip_widget.isVisible()
# Hide the calltip to avoid focus problems on Linux
control.calltip_widget.hide()
# Check spected elements for signature and documentation
for element in signature:
assert element in control.calltip_widget.text()
for element in documentation:
assert element in control.calltip_widget.text()
@flaky(max_runs=3)
@pytest.mark.auto_backend
@pytest.mark.skipif(
running_in_ci() and not os.name == 'nt',
reason="Times out on Linux and macOS")
def test_auto_backend(ipyconsole, qtbot):
"""Test that the automatic backend was set correctly."""
# Wait until the window is fully up
shell = ipyconsole.get_current_shellwidget()
with qtbot.waitSignal(shell.executed):
shell.execute("ip = get_ipython(); ip.kernel.eventloop")
# Assert there are no errors in the console and we set the right
# backend.
control = ipyconsole.get_widget().get_focus_widget()
assert 'NOTE' not in control.toPlainText()
assert 'Error' not in control.toPlainText()
assert 'loop_qt5' in control.toPlainText()
@flaky(max_runs=3)
@pytest.mark.tk_backend
@pytest.mark.skipif(
running_in_ci() and not os.name == 'nt',
reason="Times out on Linux and macOS")
def test_tk_backend(ipyconsole, qtbot):
"""Test that the Tkinter backend was set correctly."""
# Wait until the window is fully up
shell = ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
with qtbot.waitSignal(shell.executed):
shell.execute("ip = get_ipython(); ip.kernel.eventloop")
# Assert we set the right backend in the kernel.
control = ipyconsole.get_widget().get_focus_widget()
assert 'loop_tk' in control.toPlainText()
@flaky(max_runs=3)
@pytest.mark.pylab_client
def test_pylab_client(ipyconsole, qtbot):
"""Test that the Pylab console is working correctly."""
# Wait until the window is fully up
shell = ipyconsole.get_current_shellwidget()
# This is here to generate further errors
with qtbot.waitSignal(shell.executed):
shell.execute("e")
# Assert there are no errors in the console
control = ipyconsole.get_widget().get_focus_widget()
assert 'Error' not in control.toPlainText()
# Reset the console namespace
shell.reset_namespace()
qtbot.wait(1000)
# See that `e` is still defined from numpy after reset
with qtbot.waitSignal(shell.executed):
shell.execute("e")
# Assert there are no errors after restting the console
control = ipyconsole.get_widget().get_focus_widget()
assert 'Error' not in control.toPlainText()
@flaky(max_runs=3)
@pytest.mark.sympy_client
@pytest.mark.xfail('1.0' < sympy.__version__ < '1.2',
reason="A bug with sympy 1.1.1 and IPython-Qtconsole")
def test_sympy_client(ipyconsole, qtbot):
"""Test that the SymPy console is working correctly."""
# Wait until the window is fully up
shell = ipyconsole.get_current_shellwidget()
# This is here to generate further errors
with qtbot.waitSignal(shell.executed):
shell.execute("x")
# Assert there are no errors in the console
control = ipyconsole.get_widget().get_focus_widget()
assert 'NameError' not in control.toPlainText()
# Reset the console namespace
shell.reset_namespace()
qtbot.wait(1000)
# See that `e` is still defined from sympy after reset
with qtbot.waitSignal(shell.executed):
shell.execute("x")
# Assert there are no errors after resetting the console
control = ipyconsole.get_widget().get_focus_widget()
assert 'NameError' not in control.toPlainText()
@flaky(max_runs=3)
@pytest.mark.cython_client
@pytest.mark.skipif(
(not sys.platform.startswith('linux') or
parse_version(ipy_release.version) == parse_version('7.11.0')),
reason="It only works reliably on Linux and fails for IPython 7.11.0")
def test_cython_client(ipyconsole, qtbot):
"""Test that the Cython console is working correctly."""
# Wait until the window is fully up
shell = ipyconsole.get_current_shellwidget()
# This is here to generate further errors
with qtbot.waitSignal(shell.executed, timeout=SHELL_TIMEOUT):
shell.execute("%%cython\n"
"cdef int ctest(int x, int y):\n"
" return x + y")
# Assert there are no errors in the console
control = ipyconsole.get_widget().get_focus_widget()
assert 'Error' not in control.toPlainText()
# Reset the console namespace
shell.reset_namespace()
qtbot.wait(1000)
# See that cython is still enabled after reset
with qtbot.waitSignal(shell.executed, timeout=SHELL_TIMEOUT):
shell.execute("%%cython\n"
"cdef int ctest(int x, int y):\n"
" return x + y")
# Assert there are no errors after restting the console
control = ipyconsole.get_widget().get_focus_widget()
assert 'Error' not in control.toPlainText()
@flaky(max_runs=3)
def test_tab_rename_for_slaves(ipyconsole, qtbot):
"""Test slave clients are renamed correctly."""
cf = ipyconsole.get_current_client().connection_file
ipyconsole.get_widget()._create_client_for_kernel(cf, None, None, None)
qtbot.waitUntil(lambda: len(ipyconsole.get_clients()) == 2)
# Rename slave
ipyconsole.get_widget().rename_tabs_after_change('foo')
# Assert both clients have the same name
assert 'foo' in ipyconsole.get_clients()[0].get_name()
assert 'foo' in ipyconsole.get_clients()[1].get_name()
@flaky(max_runs=3)
def test_no_repeated_tabs_name(ipyconsole, qtbot):
"""Test that tabs can't have repeated given names."""
# Rename first client
ipyconsole.get_widget().rename_tabs_after_change('foo')
# Create a new client and try to rename it
ipyconsole.create_new_client()
ipyconsole.get_widget().rename_tabs_after_change('foo')
# Assert the rename didn't take place
client_name = ipyconsole.get_current_client().get_name()
assert '2' in client_name
@flaky(max_runs=3)
@pytest.mark.skipif(
running_in_ci() and sys.platform == 'darwin',
reason="Hangs sometimes on macOS")
def test_tabs_preserve_name_after_move(ipyconsole, qtbot):
"""Test that tabs preserve their names after they are moved."""
# Create a new client
ipyconsole.create_new_client()
# Move tabs
ipyconsole.get_widget().tabwidget.tabBar().moveTab(0, 1)
# Assert the second client is in the first position
client_name = ipyconsole.get_clients()[0].get_name()
assert '2' in client_name
@flaky(max_runs=3)
def test_conf_env_vars(ipyconsole, qtbot):
"""Test that kernels have env vars set by our kernel spec."""
# Wait until the window is fully up
shell = ipyconsole.get_current_shellwidget()
# Get a CONF env var
with qtbot.waitSignal(shell.executed):
shell.execute("import os; a = os.environ.get('SPY_SYMPY_O')")
# Assert we get the assigned value correctly
assert shell.get_value('a') == 'False'
@flaky(max_runs=3)
@pytest.mark.no_stderr_file
def test_no_stderr_file(ipyconsole, qtbot):
"""Test that consoles can run without an stderr."""
# Wait until the window is fully up
shell = ipyconsole.get_current_shellwidget()
# Execute a simple assignment
with qtbot.waitSignal(shell.executed):
shell.execute('a = 1')
# Assert we get the assigned value correctly
assert shell.get_value('a') == 1
@pytest.mark.non_ascii_dir
@flaky(max_runs=3)
@pytest.mark.skipif(os.name == 'nt', reason="It fails on Windows")
def test_non_ascii_stderr_file(ipyconsole, qtbot):
"""Test the creation of a console with a stderr file in a non-ascii dir."""
# Wait until the window is fully up
shell = ipyconsole.get_current_shellwidget()
# Execute a simple assignment
with qtbot.waitSignal(shell.executed):
shell.execute('a = 1')
# Assert we get the assigned value
assert shell.get_value('a') == 1
@flaky(max_runs=3)
@pytest.mark.skipif(PY2 and sys.platform == 'darwin',
reason="It hangs frequently on Python 2.7 and macOS")
def test_console_import_namespace(ipyconsole, qtbot):
"""Test an import of the form 'from foo import *'."""
# Wait until the window is fully up
shell = ipyconsole.get_current_shellwidget()
# Import numpy
with qtbot.waitSignal(shell.executed):
shell.execute('from numpy import *')
# Assert we get the e value correctly
assert shell.get_value('e') == 2.718281828459045
@flaky(max_runs=3)
def test_console_disambiguation(ipyconsole, qtbot):
"""Test the disambiguation of dedicated consoles."""
# Create directories and file for TEMP_DIRECTORY/a/b/c.py
# and TEMP_DIRECTORY/a/d/c.py
dir_b = osp.join(TEMP_DIRECTORY, 'a', 'b')
filename_b = osp.join(dir_b, 'c.py')
if not osp.isdir(dir_b):
os.makedirs(dir_b)
if not osp.isfile(filename_b):
file_c = open(filename_b, 'w+')
file_c.close()
dir_d = osp.join(TEMP_DIRECTORY, 'a', 'd')
filename_d = osp.join(dir_d, 'c.py')
if not osp.isdir(dir_d):
os.makedirs(dir_d)
if not osp.isfile(filename_d):
file_e = open(filename_d, 'w+')
file_e.close()
# Create new client and assert name without disambiguation
ipyconsole.create_client_for_file(filename_b)
client = ipyconsole.get_current_client()
assert client.get_name() == 'c.py/A'
# Create new client and assert name with disambiguation
ipyconsole.create_client_for_file(filename_d)
client = ipyconsole.get_current_client()
assert client.get_name() == 'c.py - d/A'
ipyconsole.get_widget().tabwidget.setCurrentIndex(1)
client = ipyconsole.get_current_client()
assert client.get_name() == 'c.py - b/A'
@flaky(max_runs=3)
def test_console_coloring(ipyconsole, qtbot):
"""Test that console gets the same coloring present in the Editor."""
config_options = ipyconsole.get_widget().config_options()
syntax_style = config_options.JupyterWidget.syntax_style
style_sheet = config_options.JupyterWidget.style_sheet
console_font_color = get_console_font_color(syntax_style)
console_background_color = get_console_background_color(style_sheet)
selected_color_scheme = ipyconsole.get_conf(
'selected', section='appearance')
color_scheme = get_color_scheme(selected_color_scheme)
editor_background_color = color_scheme['background']
editor_font_color = color_scheme['normal'][0]
console_background_color = console_background_color.replace("'", "")
editor_background_color = editor_background_color.replace("'", "")
console_font_color = console_font_color.replace("'", "")
editor_font_color = editor_font_color.replace("'", "")
assert console_background_color.strip() == editor_background_color.strip()
assert console_font_color.strip() == editor_font_color.strip()
@flaky(max_runs=3)
def test_set_cwd(ipyconsole, qtbot, tmpdir):
"""Test kernel when changing cwd."""
# Wait until the window is fully up
shell = ipyconsole.get_current_shellwidget()
# spyder-ide/spyder#6451.
savetemp = shell._cwd
tempdir = to_text_string(tmpdir.mkdir("queen's"))
shell.set_cwd(tempdir)
# Get current directory.
with qtbot.waitSignal(shell.executed):
shell.execute("import os; cwd = os.getcwd()")
# Assert we get the assigned value correctly
assert shell.get_value('cwd') == tempdir
# Restore original.
shell.set_cwd(savetemp)
@flaky(max_runs=3)
def test_get_cwd(ipyconsole, qtbot, tmpdir):
"""Test current working directory."""
# Wait until the window is fully up
shell = ipyconsole.get_current_shellwidget()
# spyder-ide/spyder#6451.
savetemp = shell._cwd
tempdir = to_text_string(tmpdir.mkdir("queen's"))
assert shell._cwd != tempdir
# Need to escape \ on Windows.
if os.name == 'nt':
tempdir = tempdir.replace(u"\\", u"\\\\")
# Change directory in the console.
with qtbot.waitSignal(shell.executed):
shell.execute(u"import os; os.chdir(u'''{}''')".format(tempdir))
# Ask for directory.
with qtbot.waitSignal(shell.sig_working_directory_changed):
shell.update_cwd()
if os.name == 'nt':
tempdir = tempdir.replace(u"\\\\", u"\\")
assert shell._cwd == tempdir
shell.set_cwd(savetemp)
@flaky(max_runs=3)
def test_request_env(ipyconsole, qtbot):
"""Test that getting env vars from the kernel is working as expected."""
shell = ipyconsole.get_current_shellwidget()
# Add a new entry to os.environ
with qtbot.waitSignal(shell.executed):
shell.execute("import os; os.environ['FOO'] = 'bar'" )
# Ask for os.environ contents
with qtbot.waitSignal(shell.sig_show_env) as blocker:
shell.request_env()
# Get env contents from the signal
env_contents = blocker.args[0]
# Assert that our added entry is part of os.environ
assert env_contents['FOO'] == 'bar'
@flaky(max_runs=3)
@pytest.mark.skipif(os.name == 'nt',
reason="Fails due to differences in path handling")
def test_request_syspath(ipyconsole, qtbot, tmpdir):
"""
Test that getting sys.path contents from the kernel is working as
expected.
"""
shell = ipyconsole.get_current_shellwidget()
# Add a new entry to sys.path
with qtbot.waitSignal(shell.executed):
tmp_dir = to_text_string(tmpdir)
shell.execute("import sys; sys.path.append('%s')" % tmp_dir)
# Ask for sys.path contents
with qtbot.waitSignal(shell.sig_show_syspath) as blocker:
shell.request_syspath()
# Get sys.path contents from the signal
syspath_contents = blocker.args[0]
# Assert that our added entry is part of sys.path
assert tmp_dir in syspath_contents
@flaky(max_runs=10)
@pytest.mark.skipif(os.name == 'nt', reason="It doesn't work on Windows")
def test_save_history_dbg(ipyconsole, qtbot):
"""Test that browsing command history is working while debugging."""
shell = ipyconsole.get_current_shellwidget()
# Give focus to the widget that's going to receive clicks
control = ipyconsole.get_widget().get_focus_widget()
control.setFocus()
# Enter debugging mode
with qtbot.waitSignal(shell.executed):
shell.execute('%debug print()')
# Enter an expression
with qtbot.waitSignal(shell.executed):
qtbot.keyClicks(control, 'aa = 10')
qtbot.keyClick(control, Qt.Key_Enter)
# Add a pdb command to make sure it is not saved
with qtbot.waitSignal(shell.executed):
qtbot.keyClicks(control, '!u')
qtbot.keyClick(control, Qt.Key_Enter)
# Add an empty line to make sure it is not saved
with qtbot.waitSignal(shell.executed):
qtbot.keyClick(control, Qt.Key_Enter)
# Clear console (for some reason using shell.clear_console
# doesn't work here)
shell.reset(clear=True)
qtbot.waitUntil(lambda: shell.is_waiting_pdb_input())
# Make sure we are debugging
assert shell.is_waiting_pdb_input()
# Press Up arrow button and assert we get the last
# introduced command
qtbot.keyClick(control, Qt.Key_Up)
assert 'aa = 10' in control.toPlainText()
# Open new widget
ipyconsole.create_new_client()
shell = ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
# Give focus to the widget that's going to receive clicks
control = ipyconsole.get_widget().get_focus_widget()
control.setFocus()
# Enter debugging mode
with qtbot.waitSignal(shell.executed):
shell.execute('%debug print()')
# Press Up arrow button and assert we get the last
# introduced command
qtbot.keyClick(control, Qt.Key_Up)
assert 'aa = 10' in control.toPlainText()
with qtbot.waitSignal(shell.executed):
qtbot.keyClick(control, Qt.Key_Enter)
# Add a multiline statment and ckeck we can browse it correctly
shell._pdb_history.append('if True:\n print(1)')
shell._pdb_history.append('print(2)')
shell._pdb_history.append('if True:\n print(10)')
shell._pdb_history_index = len(shell._pdb_history)
# The continuation prompt is here
qtbot.keyClick(control, Qt.Key_Up)
assert '...: print(10)' in control.toPlainText()
shell._control.set_cursor_position(shell._control.get_position('eof') - 25)
qtbot.keyClick(control, Qt.Key_Up)
assert '...: print(1)' in control.toPlainText()
@flaky(max_runs=3)
@pytest.mark.skipif(PY2 or IPython.version_info < (7, 17),
reason="insert is not the same in py2")
def test_dbg_input(ipyconsole, qtbot):
"""Test that spyder doesn't send pdb commands to unrelated input calls."""
shell = ipyconsole.get_current_shellwidget()
# Give focus to the widget that's going to receive clicks
control = ipyconsole.get_widget().get_focus_widget()
control.setFocus()
# Debug with input
with qtbot.waitSignal(shell.executed):
shell.execute("%debug print('Hello', input('name'))")
# Reach the 'name' input
shell.pdb_execute('!n')
qtbot.wait(100)
qtbot.waitUntil(lambda: control.toPlainText().split()[-1] == 'name')
# Execute some code and make sure that it doesn't work
# as this is not a pdb prompt
shell.pdb_execute('!n')
shell.pdb_execute('aa = 10')
qtbot.wait(500)
assert control.toPlainText().split()[-1] == 'name'
shell.kernel_client.input('test')
qtbot.waitUntil(lambda: 'Hello test' in control.toPlainText())
@flaky(max_runs=3)
@pytest.mark.skipif(PY2, reason="It doesn't work on PY2")
def test_unicode_vars(ipyconsole, qtbot):
"""
Test that the Variable Explorer Works with unicode variables.
"""
# Wait until the window is fully up
shell = ipyconsole.get_current_shellwidget()
# Set value for a Unicode variable
with qtbot.waitSignal(shell.executed):
shell.execute('д = 10')
# Assert we get its value correctly
assert shell.get_value('д') == 10
# Change its value and verify
shell.set_value('д', 20)
qtbot.waitUntil(lambda: shell.get_value('д') == 20)
assert shell.get_value('д') == 20
@flaky(max_runs=3)
def test_read_stderr(ipyconsole, qtbot):
"""
Test the read operation of the stderr file of the kernel
"""
client = ipyconsole.get_current_client()
# Set contents of the stderr file of the kernel
content = 'Test text'
stderr_file = client.stderr_obj.filename
codecs.open(stderr_file, 'w', 'cp437').write(content)
# Assert that content is correct
assert content == client.stderr_obj.get_contents()
@flaky(max_runs=10)
@pytest.mark.no_xvfb
@pytest.mark.skipif(running_in_ci() and os.name == 'nt',
reason="Times out on Windows")
def test_values_dbg(ipyconsole, qtbot):
"""
Test that getting, setting, copying and removing values is working while
debugging.
"""
shell = ipyconsole.get_current_shellwidget()
# Give focus to the widget that's going to receive clicks
control = ipyconsole.get_widget().get_focus_widget()
control.setFocus()
# Enter debugging mode
with qtbot.waitSignal(shell.executed):
shell.execute('%debug print()')
# Get value
with qtbot.waitSignal(shell.executed):
shell.execute('aa = 10')
assert 'aa = 10' in control.toPlainText()
assert shell.get_value('aa') == 10
# Set value
shell.set_value('aa', 20)
qtbot.waitUntil(lambda: shell.get_value('aa') == 20)
assert shell.get_value('aa') == 20
# Copy value
shell.copy_value('aa', 'bb')
qtbot.waitUntil(lambda: shell.get_value('bb') == 20)
assert shell.get_value('bb') == 20
# Remove value
shell.remove_value('aa')
def is_defined(val):
try:
shell.get_value(val)
return True
except KeyError:
return False
qtbot.waitUntil(lambda: not is_defined('aa'))
with qtbot.waitSignal(shell.executed):
shell.execute('aa')
# Wait until the message is recieved
assert "*** NameError: name 'aa' is not defined" in control.toPlainText()
@flaky(max_runs=3)
def test_execute_events_dbg(ipyconsole, qtbot):
"""Test execute events while debugging"""
shell = ipyconsole.get_current_shellwidget()
# Give focus to the widget that's going to receive clicks
control = ipyconsole.get_widget().get_focus_widget()
control.setFocus()
# Import Matplotlib
with qtbot.waitSignal(shell.executed):
shell.execute('import matplotlib.pyplot as plt')
# Enter debugging mode
with qtbot.waitSignal(shell.executed):
shell.execute('%debug print()')
# Set processing events to True
ipyconsole.set_conf('pdb_execute_events', True)
shell.set_pdb_execute_events(True)
# Test reset magic
qtbot.keyClicks(control, 'plt.plot(range(10))')
with qtbot.waitSignal(shell.executed):
qtbot.keyClick(control, Qt.Key_Enter)
# Assert that there's a plot in the console
assert shell._control.toHtml().count('img src') == 1
# Set processing events to False
ipyconsole.set_conf('pdb_execute_events', False)
shell.set_pdb_execute_events(False)
# Test reset magic
qtbot.keyClicks(control, 'plt.plot(range(10))')
with qtbot.waitSignal(shell.executed):
qtbot.keyClick(control, Qt.Key_Enter)
# Assert that there's no new plots in the console
assert shell._control.toHtml().count('img src') == 1
# Test if the plot is shown with plt.show()
qtbot.keyClicks(control, 'plt.show()')
with qtbot.waitSignal(shell.executed):
qtbot.keyClick(control, Qt.Key_Enter)
# Assert that there's a new plots in the console
assert shell._control.toHtml().count('img src') == 2
@flaky(max_runs=3)
def test_run_doctest(ipyconsole, qtbot):
"""
Test that doctests can be run without problems
"""
shell = ipyconsole.get_current_shellwidget()
code = dedent('''
def add(x, y):
"""
>>> add(1, 2)
3
>>> add(5.1, 2.2)
7.3
"""
return x + y
''')
# Run code
with qtbot.waitSignal(shell.executed):
shell.execute(code)
# Import doctest
with qtbot.waitSignal(shell.executed):
shell.execute('import doctest')
# Run doctest
with qtbot.waitSignal(shell.executed):
shell.execute('doctest.testmod()')
# Assert that doctests were run correctly
assert "TestResults(failed=0, attempted=2)" in shell._control.toPlainText()
@flaky(max_runs=3)
@pytest.mark.skipif(os.name == 'nt' or (PY2 and PYQT5),
reason="It times out frequently")
def test_mpl_backend_change(ipyconsole, qtbot):
"""
Test that Matplotlib backend is changed correctly when
using the %matplotlib magic
"""
shell = ipyconsole.get_current_shellwidget()
# Import Matplotlib
with qtbot.waitSignal(shell.executed):
shell.execute('import matplotlib.pyplot as plt')
# Generate a plot
with qtbot.waitSignal(shell.executed):
shell.execute('plt.plot(range(10))')
# Change backends
with qtbot.waitSignal(shell.executed):
shell.execute('%matplotlib tk')
# Generate another plot
with qtbot.waitSignal(shell.executed):
shell.execute('plt.plot(range(10))')
# Assert that there's a single inline plot in the console
assert shell._control.toHtml().count('img src') == 1
@flaky(max_runs=10)
@pytest.mark.skipif(running_in_ci(), reason="Fails frequently in CI")
def test_ctrl_c_dbg(ipyconsole, qtbot):
"""
Test that Ctrl+C works while debugging
"""
shell = ipyconsole.get_current_shellwidget()
# Give focus to the widget that's going to receive clicks
control = ipyconsole.get_widget().get_focus_widget()
control.setFocus()
# Enter debugging mode
with qtbot.waitSignal(shell.executed):
shell.execute('%debug print()')
# Test Ctrl+C
qtbot.keyClick(control, Qt.Key_C, modifier=Qt.ControlModifier)
qtbot.waitUntil(
lambda: 'For copying text while debugging, use Ctrl+Shift+C' in
control.toPlainText(), timeout=2000)
assert 'For copying text while debugging, use Ctrl+Shift+C' in control.toPlainText()
@flaky(max_runs=10)
@pytest.mark.skipif(os.name == 'nt', reason="It doesn't work on Windows")
def test_clear_and_reset_magics_dbg(ipyconsole, qtbot):
"""
Test that clear and reset magics are working while debugging
"""
shell = ipyconsole.get_current_shellwidget()
# Give focus to the widget that's going to receive clicks
control = ipyconsole.get_widget().get_focus_widget()
control.setFocus()
# Enter debugging mode
with qtbot.waitSignal(shell.executed):
shell.execute('%debug print()')
# Test clear magic
shell.clear_console()
qtbot.waitUntil(lambda: '\nIPdb [2]: ' == control.toPlainText())
# Test reset magic
qtbot.keyClicks(control, 'bb = 10')
with qtbot.waitSignal(shell.executed):
qtbot.keyClick(control, Qt.Key_Enter)
assert shell.get_value('bb') == 10
shell.reset_namespace()
qtbot.wait(1000)
qtbot.keyClicks(control, 'bb')
with qtbot.waitSignal(shell.executed):
qtbot.keyClick(control, Qt.Key_Enter)
assert "*** NameError: name 'bb' is not defined" in control.toPlainText()
@flaky(max_runs=3)
def test_restart_kernel(ipyconsole, mocker, qtbot):
"""
Test that kernel is restarted correctly
"""
# Mock method we want to check
mocker.patch.object(ClientWidget, "_show_mpl_backend_errors")
ipyconsole.create_new_client()
shell = ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
# Do an assignment to verify that it's not there after restarting
with qtbot.waitSignal(shell.executed):
shell.execute('a = 10')
# Write something to stderr to verify that it's not there after restarting
with qtbot.waitSignal(shell.executed):
shell.execute('import sys; sys.__stderr__.write("HEL"+"LO")')
qtbot.waitUntil(
lambda: 'HELLO' in shell._control.toPlainText(), timeout=SHELL_TIMEOUT)
# Restart kernel and wait until it's up again
shell._prompt_html = None
ipyconsole.restart_kernel()
qtbot.waitUntil(
lambda: shell._prompt_html is not None, timeout=SHELL_TIMEOUT)
assert 'Restarting kernel...' in shell._control.toPlainText()
assert 'HELLO' not in shell._control.toPlainText()
assert not shell.is_defined('a')
# Check that we try to show Matplotlib backend errors at the beginning and
# after the restart.
assert ClientWidget._show_mpl_backend_errors.call_count == 2
@flaky(max_runs=3)
def test_load_kernel_file_from_id(ipyconsole, qtbot):
"""
Test that a new client is created using its id
"""
client = ipyconsole.get_current_client()
connection_file = osp.basename(client.connection_file)
id_ = connection_file.split('kernel-')[-1].split('.json')[0]
ipyconsole.get_widget()._create_client_for_kernel(id_, None, None, None)
qtbot.waitUntil(lambda: len(ipyconsole.get_clients()) == 2)
new_client = ipyconsole.get_clients()[1]
assert new_client.id_ == dict(int_id='1', str_id='B')
@flaky(max_runs=3)
def test_load_kernel_file_from_location(ipyconsole, qtbot, tmpdir):
"""
Test that a new client is created using a connection file
placed in a different location from jupyter_runtime_dir
"""
client = ipyconsole.get_current_client()
fname = osp.basename(client.connection_file)
connection_file = to_text_string(tmpdir.join(fname))
shutil.copy2(client.connection_file, connection_file)
ipyconsole.get_widget()._create_client_for_kernel(connection_file, None, None, None)
qtbot.waitUntil(lambda: len(ipyconsole.get_clients()) == 2)
assert len(ipyconsole.get_clients()) == 2
@flaky(max_runs=3)
def test_load_kernel_file(ipyconsole, qtbot, tmpdir):
"""
Test that a new client is created using the connection file
of an existing client
"""
shell = ipyconsole.get_current_shellwidget()
client = ipyconsole.get_current_client()
ipyconsole.get_widget()._create_client_for_kernel(
client.connection_file, None, None, None)
qtbot.waitUntil(lambda: len(ipyconsole.get_clients()) == 2)
new_client = ipyconsole.get_clients()[1]
new_shell = new_client.shellwidget
qtbot.waitUntil(lambda: new_shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
with qtbot.waitSignal(new_shell.executed):
new_shell.execute('a = 10')
assert new_client.id_ == dict(int_id='1', str_id='B')
assert shell.get_value('a') == new_shell.get_value('a')
@flaky(max_runs=3)
def test_sys_argv_clear(ipyconsole, qtbot):
"""Test that sys.argv is cleared up correctly"""
shell = ipyconsole.get_current_shellwidget()
with qtbot.waitSignal(shell.executed):
shell.execute('import sys; A = sys.argv')
argv = shell.get_value("A")
assert argv == ['']
@flaky(max_runs=5)
@pytest.mark.skipif(os.name == 'nt', reason="Fails sometimes on Windows")
def test_set_elapsed_time(ipyconsole, qtbot):
"""Test that the IPython console elapsed timer is set correctly."""
client = ipyconsole.get_current_client()
# Show time label.
ipyconsole.get_widget().set_show_elapsed_time_current_client(True)
# Set time to 2 minutes ago.
client.t0 -= 120
with qtbot.waitSignal(client.timer.timeout, timeout=5000):
ipyconsole.get_widget().set_client_elapsed_time(client)
assert ('00:02:00' in client.time_label.text() or
'00:02:01' in client.time_label.text())
# Wait for a second to pass, to ensure timer is counting up
with qtbot.waitSignal(client.timer.timeout, timeout=5000):
pass
assert ('00:02:01' in client.time_label.text() or
'00:02:02' in client.time_label.text())
# Make previous time later than current time.
client.t0 += 2000
with qtbot.waitSignal(client.timer.timeout, timeout=5000):
pass
assert '00:00:00' in client.time_label.text()
client.timer.timeout.disconnect(client.show_time)
@flaky(max_runs=3)
@pytest.mark.skipif(os.name == 'nt', reason="Doesn't work on Windows")
def test_stderr_file_is_removed_one_kernel(ipyconsole, qtbot, monkeypatch):
"""Test that consoles removes stderr when client is closed."""
client = ipyconsole.get_current_client()
# In a normal situation file should exist
monkeypatch.setattr(QMessageBox, 'question',
classmethod(lambda *args: QMessageBox.Yes))
assert osp.exists(client.stderr_obj.filename)
ipyconsole.close_client(client=client)
assert not osp.exists(client.stderr_obj.filename)
@flaky(max_runs=3)
@pytest.mark.skipif(
not sys.platform.startswith('linux'),
reason="Doesn't work on Windows and hangs sometimes on Mac")
def test_stderr_file_is_removed_two_kernels(ipyconsole, qtbot, monkeypatch):
"""Test that console removes stderr when client and related clients
are closed."""
client = ipyconsole.get_current_client()
# New client with the same kernel
ipyconsole.get_widget()._create_client_for_kernel(
client.connection_file, None, None, None)
assert len(ipyconsole.get_widget().get_related_clients(client)) == 1
other_client = ipyconsole.get_widget().get_related_clients(client)[0]
assert client.stderr_obj.filename == other_client.stderr_obj.filename
# In a normal situation file should exist
monkeypatch.setattr(QMessageBox, 'question',
classmethod(lambda *args: QMessageBox.Yes))
assert osp.exists(client.stderr_obj.filename)
ipyconsole.close_client(client=client)
assert not osp.exists(client.stderr_obj.filename)
@flaky(max_runs=3)
@pytest.mark.skipif(os.name == 'nt', reason="Doesn't work on Windows")
def test_stderr_file_remains_two_kernels(ipyconsole, qtbot, monkeypatch):
"""Test that console doesn't remove stderr when a related client is not
closed."""
client = ipyconsole.get_current_client()
# New client with the same kernel
ipyconsole.get_widget()._create_client_for_kernel(
client.connection_file, None, None, None)
assert len(ipyconsole.get_widget().get_related_clients(client)) == 1
other_client = ipyconsole.get_widget().get_related_clients(client)[0]
assert client.stderr_obj.filename == other_client.stderr_obj.filename
# In a normal situation file should exist
monkeypatch.setattr(QMessageBox, "question",
classmethod(lambda *args: QMessageBox.No))
assert osp.exists(client.stderr_obj.filename)
ipyconsole.close_client(client=client)
assert osp.exists(client.stderr_obj.filename)
@flaky(max_runs=3)
@pytest.mark.skipif(sys.platform == 'darwin',
reason="Fails sometimes on macOS")
def test_kernel_crash(ipyconsole, qtbot):
"""Test that we show an error message when a kernel crash occurs."""
# Create an IPython kernel config file with a bad config
ipy_kernel_cfg = osp.join(get_ipython_dir(), 'profile_default',
'ipython_kernel_config.py')
with open(ipy_kernel_cfg, 'w') as f:
# This option must be a string, not an int
f.write("c.InteractiveShellApp.extra_extension = 1")
ipyconsole.create_new_client()
# Assert that the console is showing an error
qtbot.waitUntil(lambda: ipyconsole.get_clients()[-1].is_error_shown,
timeout=6000)
error_client = ipyconsole.get_clients()[-1]
assert error_client.is_error_shown
# Assert the error contains the text we expect
webview = error_client.infowidget
if WEBENGINE:
webpage = webview.page()
else:
webpage = webview.page().mainFrame()
qtbot.waitUntil(
lambda: check_text(webpage, "Bad config encountered"),
timeout=6000)
# Remove bad kernel config file
os.remove(ipy_kernel_cfg)
@flaky(max_runs=3)
@pytest.mark.skipif(not os.name == 'nt', reason="Only necessary on Windows")
def test_remove_old_std_files(ipyconsole, qtbot):
"""Test that we are removing old std files."""
shell = ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
# Create empty std files in our temp dir to see if they are removed
# correctly.
tmpdir = get_temp_dir()
open(osp.join(tmpdir, 'foo.stderr'), 'a').close()
open(osp.join(tmpdir, 'foo.stdout'), 'a').close()
# Assert that only old std files are removed
ipyconsole._remove_old_std_files()
assert not osp.isfile(osp.join(tmpdir, 'foo.stderr'))
assert not osp.isfile(osp.join(tmpdir, 'foo.stdout'))
# The current kernel std files should be present
for fname in glob.glob(osp.join(tmpdir, '*')):
assert osp.basename(fname).startswith('kernel')
assert any(
[osp.basename(fname).endswith(ext)
for ext in ('.stderr', '.stdout', '.fault')]
)
@flaky(max_runs=10)
@pytest.mark.use_startup_wdir
@pytest.mark.skipif(os.name == 'nt', reason="Too flaky on Windows")
def test_console_working_directory(ipyconsole, qtbot):
"""Test for checking the working directory."""
shell = ipyconsole.get_current_shellwidget()
with qtbot.waitSignal(shell.executed):
shell.execute('import os; cwd = os.getcwd()')
current_wdir = shell.get_value('cwd')
folders = osp.split(current_wdir)
assert folders[-1] == NEW_DIR
@flaky(max_runs=3)
@pytest.mark.skipif(not sys.platform.startswith('linux') or PY2,
reason="It only works on Linux with python 3.")
def test_console_complete(ipyconsole, qtbot, tmpdir):
"""Test code completions in the console."""
shell = ipyconsole.get_current_shellwidget()
# Give focus to the widget that's going to receive clicks
control = ipyconsole.get_widget().get_focus_widget()
control.setFocus()
def check_value(name, value):
try:
return shell.get_value(name) == value
except KeyError:
return False
# test complete with one result
with qtbot.waitSignal(shell.executed):
shell.execute('cbs = 1')
qtbot.waitUntil(lambda: check_value('cbs', 1))
qtbot.wait(500)
qtbot.keyClicks(control, 'cb')
qtbot.keyClick(control, Qt.Key_Tab)
# Jedi completion takes time to start up the first time
qtbot.waitUntil(lambda: control.toPlainText().split()[-1] == 'cbs',
timeout=6000)
# test complete with several result
with qtbot.waitSignal(shell.executed):
shell.execute('cbba = 1')
qtbot.waitUntil(lambda: check_value('cbba', 1))
qtbot.keyClicks(control, 'cb')
qtbot.keyClick(control, Qt.Key_Tab)
qtbot.waitUntil(shell._completion_widget.isVisible)
# cbs is another solution, so not completed yet
assert control.toPlainText().split()[-1] == 'cb'
qtbot.keyClick(shell._completion_widget, Qt.Key_Enter)
qtbot.waitUntil(lambda: control.toPlainText().split()[-1] == 'cbba')
# Enter debugging mode
with qtbot.waitSignal(shell.executed):
shell.execute('%debug print()')
# Test complete in debug mode
# check abs is completed twice (as the cursor moves)
qtbot.keyClicks(control, 'ab')
qtbot.keyClick(control, Qt.Key_Tab)
qtbot.waitUntil(lambda: control.toPlainText().split()[-1] == 'abs')
with qtbot.waitSignal(shell.executed):
qtbot.keyClick(control, Qt.Key_Enter)
# A second time to check a function call doesn't cause a problem
qtbot.keyClicks(control, 'print(ab')
qtbot.keyClick(control, Qt.Key_Tab)
qtbot.waitUntil(
lambda: control.toPlainText().split()[-1] == 'print(abs')
qtbot.keyClicks(control, ')')
with qtbot.waitSignal(shell.executed):
qtbot.keyClick(control, Qt.Key_Enter)
# Enter an expression
qtbot.keyClicks(control, 'baab = 10')
with qtbot.waitSignal(shell.executed):
qtbot.keyClick(control, Qt.Key_Enter)
qtbot.wait(100)
qtbot.waitUntil(lambda: check_value('baab', 10))
# Check baab is completed
qtbot.keyClicks(control, 'baa')
qtbot.keyClick(control, Qt.Key_Tab)
qtbot.waitUntil(lambda: control.toPlainText().split()[-1] == 'baab')
with qtbot.waitSignal(shell.executed):
qtbot.keyClick(control, Qt.Key_Enter)
# Check the completion widget is shown for abba, abs
qtbot.keyClicks(control, 'abba = 10')
with qtbot.waitSignal(shell.executed):
qtbot.keyClick(control, Qt.Key_Enter)
qtbot.wait(100)
qtbot.waitUntil(lambda: check_value('abba', 10))
qtbot.keyClicks(control, 'ab')
qtbot.keyClick(control, Qt.Key_Tab)
qtbot.waitUntil(shell._completion_widget.isVisible)
assert control.toPlainText().split()[-1] == 'ab'
qtbot.keyClick(shell._completion_widget, Qt.Key_Enter)
qtbot.waitUntil(lambda: control.toPlainText().split()[-1] == 'abba')
with qtbot.waitSignal(shell.executed):
qtbot.keyClick(control, Qt.Key_Enter)
# Create a class
qtbot.keyClicks(control, 'class A(): baba = 1')
with qtbot.waitSignal(shell.executed):
qtbot.keyClick(control, Qt.Key_Enter)
qtbot.wait(100)
qtbot.waitUntil(lambda: shell.is_defined('A'))
qtbot.keyClicks(control, 'a = A()')
with qtbot.waitSignal(shell.executed):
qtbot.keyClick(control, Qt.Key_Enter)
qtbot.wait(100)
qtbot.waitUntil(lambda: shell.is_defined('a'))
# Check we can complete attributes
qtbot.keyClicks(control, 'a.ba')
qtbot.keyClick(control, Qt.Key_Tab)
qtbot.waitUntil(lambda: control.toPlainText().split()[-1] == 'a.baba')
with qtbot.waitSignal(shell.executed):
qtbot.keyClick(control, Qt.Key_Enter)
# Check we can complete pdb command names
qtbot.keyClicks(control, '!longl')
qtbot.keyClick(control, Qt.Key_Tab)
qtbot.waitUntil(lambda: control.toPlainText().split()[-1] == '!longlist')
with qtbot.waitSignal(shell.executed):
qtbot.keyClick(control, Qt.Key_Enter)
# Check we can use custom complete for pdb
test_file = tmpdir.join('test.py')
test_file.write('stuff\n')
# Set a breakpoint in the new file
qtbot.keyClicks(control, '!b ' + str(test_file) + ':1')
with qtbot.waitSignal(shell.executed):
qtbot.keyClick(control, Qt.Key_Enter)
# Check we can complete the breakpoint number
qtbot.keyClicks(control, '!ignore ')
qtbot.keyClick(control, Qt.Key_Tab)
qtbot.waitUntil(lambda: control.toPlainText().split()[-1] == '1')
@flaky(max_runs=10)
@pytest.mark.use_startup_wdir
def test_pdb_multiline(ipyconsole, qtbot):
"""Test entering a multiline statment into pdb"""
shell = ipyconsole.get_current_shellwidget()
# Give focus to the widget that's going to receive clicks
control = ipyconsole.get_widget().get_focus_widget()
control.setFocus()
with qtbot.waitSignal(shell.executed):
shell.execute('%debug print()')
assert '\nIPdb [' in control.toPlainText()
# Test reset magic
qtbot.keyClicks(control, 'if True:')
qtbot.keyClick(control, Qt.Key_Enter)
qtbot.wait(500)
qtbot.keyClicks(control, 'bb = 10')
qtbot.keyClick(control, Qt.Key_Enter)
qtbot.wait(500)
qtbot.keyClick(control, Qt.Key_Enter)
qtbot.wait(500)
assert shell.get_value('bb') == 10
assert "if True:\n ...: bb = 10\n" in control.toPlainText()
@flaky(max_runs=3)
@pytest.mark.parametrize(
"show_lib", [True, False])
def test_pdb_ignore_lib(ipyconsole, qtbot, show_lib):
"""Test that pdb can avoid closed files."""
shell = ipyconsole.get_current_shellwidget()
# Give focus to the widget that's going to receive clicks
control = ipyconsole.get_widget().get_focus_widget()
control.setFocus()
# Tests assume inline backend
ipyconsole.set_conf('pdb_ignore_lib', not show_lib)
with qtbot.waitSignal(shell.executed):
shell.execute('%debug print()')
qtbot.keyClicks(control, '!s')
with qtbot.waitSignal(shell.executed):
qtbot.keyClick(control, Qt.Key_Enter)
qtbot.wait(500)
qtbot.keyClicks(control, '!q')
with qtbot.waitSignal(shell.executed):
qtbot.keyClick(control, Qt.Key_Enter)
if show_lib:
assert 'iostream.py' in control.toPlainText()
else:
assert 'iostream.py' not in control.toPlainText()
ipyconsole.set_conf('pdb_ignore_lib', True)
@flaky(max_runs=3)
@pytest.mark.skipif(sys.platform == 'darwin', reason="Times out on macOS")
def test_calltip(ipyconsole, qtbot):
"""
Test Calltip.
See spyder-ide/spyder#10842
"""
shell = ipyconsole.get_current_shellwidget()
# Give focus to the widget that's going to receive clicks
control = ipyconsole.get_widget().get_focus_widget()
control.setFocus()
with qtbot.waitSignal(shell.executed):
shell.execute('a = {"a": 1}')
qtbot.keyClicks(control, 'a.keys(', delay=100)
qtbot.wait(1000)
assert control.calltip_widget.isVisible()
@flaky(max_runs=3)
@pytest.mark.order(1)
@pytest.mark.test_environment_interpreter
def test_conda_env_activation(ipyconsole, qtbot):
"""
Test that the conda environment associated with an external interpreter
is activated before a kernel is created for it.
"""
# Wait until the window is fully up
shell = ipyconsole.get_current_shellwidget()
# Get conda activation environment variable
with qtbot.waitSignal(shell.executed):
shell.execute(
"import os; conda_prefix = os.environ.get('CONDA_PREFIX')")
expected_output = get_conda_test_env().replace('\\', '/')
if is_conda_env(expected_output):
output = shell.get_value('conda_prefix').replace('\\', '/')
assert expected_output == output
@flaky(max_runs=3)
@pytest.mark.skipif(os.name == 'nt', reason="no SIGTERM on Windows")
def test_kernel_kill(ipyconsole, qtbot):
"""
Test that the kernel correctly restarts after a kill.
"""
shell = ipyconsole.get_current_shellwidget()
# Wait for the restarter to start
qtbot.wait(3000)
crash_string = 'import os, signal; os.kill(os.getpid(), signal.SIGTERM)'
# Check only one comm is open
old_open_comms = list(shell.spyder_kernel_comm._comms.keys())
assert len(old_open_comms) == 1
with qtbot.waitSignal(shell.sig_prompt_ready, timeout=30000):
shell.execute(crash_string)
assert crash_string in shell._control.toPlainText()
assert "Restarting kernel..." in shell._control.toPlainText()
# Check a new comm replaced the old one
new_open_comms = list(shell.spyder_kernel_comm._comms.keys())
assert len(new_open_comms) == 1
assert old_open_comms[0] != new_open_comms[0]
# Wait until the comm replies
qtbot.waitUntil(
lambda: shell.spyder_kernel_comm._comms[new_open_comms[0]][
'status'] == 'ready')
assert shell.spyder_kernel_comm._comms[new_open_comms[0]][
'status'] == 'ready'
@flaky(max_runs=3)
@pytest.mark.parametrize("spyder_pythonpath", [True, False])
def test_wrong_std_module(ipyconsole, qtbot, tmpdir, spyder_pythonpath):
"""
Test that a file with the same name of a standard library module in
the current working directory doesn't break the console.
"""
# Create an empty file called random.py in the cwd
if spyder_pythonpath:
wrong_random_mod = tmpdir.join('random.py')
wrong_random_mod.write('')
wrong_random_mod = str(wrong_random_mod)
ipyconsole.set_conf('spyder_pythonpath', [str(tmpdir)], section='main')
else:
wrong_random_mod = osp.join(os.getcwd(), 'random.py')
with open(wrong_random_mod, 'w') as f:
f.write('')
# Create a new client to see if its kernel starts despite the
# faulty module.
ipyconsole.create_new_client()
# A prompt should be created if the kernel didn't crash.
shell = ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
# Assert the extra path from spyder_pythonpath was added
if spyder_pythonpath:
check_sys_path = (
"import sys; path_added = r'{}' in sys.path".format(str(tmpdir))
)
with qtbot.waitSignal(shell.sig_prompt_ready, timeout=30000):
shell.execute(check_sys_path)
assert shell.get_value('path_added')
# Remove wrong module
os.remove(wrong_random_mod)
# Restore CONF
ipyconsole.set_conf('spyder_pythonpath', [], section='main')
@flaky(max_runs=3)
@pytest.mark.skipif(os.name == 'nt', reason="no SIGTERM on Windows")
def test_kernel_restart_after_manual_restart_and_crash(ipyconsole, qtbot):
"""
Test that the kernel restarts correctly after being restarted
manually and then it crashes.
This is a regresion for spyder-ide/spyder#12972.
"""
shell = ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
# Restart kernel and wait until it's up again
shell._prompt_html = None
ipyconsole.restart_kernel()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
# Wait for the restarter to start
qtbot.wait(3000)
# Generate a crash
crash_string = 'import os, signal; os.kill(os.getpid(), signal.SIGTERM)'
with qtbot.waitSignal(shell.sig_prompt_ready, timeout=30000):
shell.execute(crash_string)
assert crash_string in shell._control.toPlainText()
# Evaluate an expression to be sure the restart was successful
with qtbot.waitSignal(shell.executed):
shell.execute('a = 10')
assert shell.is_defined('a')
# Wait until the comm replies
open_comms = list(shell.spyder_kernel_comm._comms.keys())
qtbot.waitUntil(
lambda: shell.spyder_kernel_comm._comms[open_comms[0]][
'status'] == 'ready')
@flaky(max_runs=3)
def test_stderr_poll(ipyconsole, qtbot):
"""Test if the content of stderr is printed to the console."""
shell = ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
client = ipyconsole.get_current_client()
client.stderr_obj.handle.flush()
with open(client.stderr_obj.filename, 'a') as f:
f.write("test_test")
# Wait for the poll
qtbot.waitUntil(lambda: "test_test" in ipyconsole.get_widget(
).get_focus_widget().toPlainText())
assert "test_test" in ipyconsole.get_widget(
).get_focus_widget().toPlainText()
# Write a second time, makes sure it is not duplicated
client.stderr_obj.handle.flush()
with open(client.stderr_obj.filename, 'a') as f:
f.write("\ntest_test")
# Wait for the poll
qtbot.waitUntil(lambda: ipyconsole.get_widget().get_focus_widget(
).toPlainText().count("test_test") == 2)
assert ipyconsole.get_widget().get_focus_widget().toPlainText(
).count("test_test") == 2
@flaky(max_runs=3)
def test_stdout_poll(ipyconsole, qtbot):
"""Test if the content of stdout is printed to the console."""
shell = ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
client = ipyconsole.get_current_client()
client.stdout_obj.handle.flush()
with open(client.stdout_obj.filename, 'a') as f:
f.write("test_test")
# Wait for the poll
qtbot.waitUntil(lambda: "test_test" in ipyconsole.get_widget(
).get_focus_widget().toPlainText(), timeout=5000)
assert "test_test" in ipyconsole.get_widget().get_focus_widget(
).toPlainText()
@flaky(max_runs=10)
@pytest.mark.use_startup_wdir
def test_startup_code_pdb(ipyconsole, qtbot):
"""Test that startup code for pdb works."""
shell = ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
# Give focus to the widget that's going to receive clicks
control = ipyconsole.get_widget().get_focus_widget()
control.setFocus()
# Run a line on startup
ipyconsole.set_conf(
'startup/pdb_run_lines',
'abba = 12; print("Hello")'
)
shell.execute('%debug print()')
qtbot.waitUntil(lambda: 'Hello' in control.toPlainText())
# Verify that the line was executed
assert shell.get_value('abba') == 12
# Reset setting
ipyconsole.set_conf('startup/pdb_run_lines', '')
@flaky(max_runs=3)
@pytest.mark.parametrize(
"backend",
['inline', 'qt5', 'tk', 'osx']
)
def test_pdb_eventloop(ipyconsole, qtbot, backend):
"""Check if setting an event loop while debugging works."""
# Skip failing tests
if backend == 'tk' and os.name == 'nt':
return
if backend == 'osx' and sys.platform != "darwin":
return
if backend == 'qt5' and not os.name == "nt" and running_in_ci():
return
shell = ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
control = ipyconsole.get_widget().get_focus_widget()
with qtbot.waitSignal(shell.executed):
shell.execute("%matplotlib " + backend)
with qtbot.waitSignal(shell.executed):
shell.execute("%debug print()")
with qtbot.waitSignal(shell.executed):
shell.execute("print('Two: ' + str(1+1))")
assert "Two: 2" in control.toPlainText()
@flaky(max_runs=3)
def test_recursive_pdb(ipyconsole, qtbot):
"""Check commands and code are separted."""
shell = ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
control = ipyconsole.get_widget().get_focus_widget()
with qtbot.waitSignal(shell.executed):
shell.execute("%debug print()")
with qtbot.waitSignal(shell.executed):
shell.pdb_execute("abab = 10")
# Check that we can't use magic twice
with qtbot.waitSignal(shell.executed):
shell.pdb_execute("%debug print()")
assert "Please don't use '%debug'" in control.toPlainText()
# Check we can enter the recursive debugger twice
with qtbot.waitSignal(shell.executed):
shell.pdb_execute("!debug print()")
assert "(IPdb [1]):" in control.toPlainText()
with qtbot.waitSignal(shell.executed):
shell.pdb_execute("!debug print()")
assert "((IPdb [1])):" in control.toPlainText()
# quit one layer
with qtbot.waitSignal(shell.executed):
shell.pdb_execute("!quit")
assert control.toPlainText().split()[-2:] == ["(IPdb", "[2]):"]
# Check completion works
qtbot.keyClicks(control, 'aba')
qtbot.keyClick(control, Qt.Key_Tab)
qtbot.waitUntil(lambda: control.toPlainText().split()[-1] == 'abab',
timeout=SHELL_TIMEOUT)
# quit one layer
with qtbot.waitSignal(shell.executed):
shell.pdb_execute("!quit")
assert control.toPlainText().split()[-2:] == ["IPdb", "[4]:"]
# Check completion works
qtbot.keyClicks(control, 'aba')
qtbot.keyClick(control, Qt.Key_Tab)
qtbot.waitUntil(lambda: control.toPlainText().split()[-1] == 'abab',
timeout=SHELL_TIMEOUT)
with qtbot.waitSignal(shell.executed):
shell.pdb_execute("!quit")
with qtbot.waitSignal(shell.executed):
shell.execute("1 + 1")
assert control.toPlainText().split()[-2:] == ["In", "[3]:"]
@flaky(max_runs=3)
@pytest.mark.skipif(os.name == 'nt', reason="Doesn't work on windows")
def test_stop_pdb(ipyconsole, qtbot):
"""Test if we can stop pdb"""
shell = ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
control = ipyconsole.get_widget().get_focus_widget()
stop_button = ipyconsole.get_widget().stop_button
# Enter pdb
with qtbot.waitSignal(shell.executed):
shell.execute("%debug print()")
# Start and interrupt a long execution
shell.execute("import time; time.sleep(10)")
qtbot.wait(500)
with qtbot.waitSignal(shell.executed, timeout=1000):
qtbot.mouseClick(stop_button, Qt.LeftButton)
assert "KeyboardInterrupt" in control.toPlainText()
# We are still in the debugger
assert "IPdb [2]:" in control.toPlainText()
assert "In [2]:" not in control.toPlainText()
# Leave the debugger
with qtbot.waitSignal(shell.executed):
qtbot.mouseClick(stop_button, Qt.LeftButton)
assert "In [2]:" in control.toPlainText()
@flaky(max_runs=3)
@pytest.mark.skipif(sys.platform == 'nt', reason="Times out on Windows")
def test_code_cache(ipyconsole, qtbot):
"""
Test that code sent to execute is properly cached
and that the cache is empited on interrupt.
"""
shell = ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
# Give focus to the widget that's going to receive clicks
control = ipyconsole.get_widget().get_focus_widget()
control.setFocus()
def check_value(name, value):
try:
return shell.get_value(name) == value
except KeyError:
return False
# Send two execute requests and make sure the second one is executed
shell.execute('import time; time.sleep(.5)')
shell.execute('var = 142')
qtbot.wait(500)
qtbot.waitUntil(lambda: check_value('var', 142))
assert shell.get_value('var') == 142
# Send two execute requests and cancel the second one
shell.execute('import time; time.sleep(.5)')
shell.execute('var = 1000')
shell.interrupt_kernel()
qtbot.wait(1000)
# Make sure the value of var didn't change
assert shell.get_value('var') == 142
# Same for debugging
with qtbot.waitSignal(shell.executed):
shell.execute('%debug print()')
assert 'IPdb [' in shell._control.toPlainText()
# Send two execute requests and make sure the second one is executed
shell.execute('time.sleep(.5)')
shell.execute('var = 318')
qtbot.wait(500)
qtbot.waitUntil(lambda: check_value('var', 318))
assert shell.get_value('var') == 318
# Send two execute requests and cancel the second one
shell.execute('import time; time.sleep(.5)')
shell.execute('var = 1000')
shell.interrupt_kernel()
qtbot.wait(1000)
# Make sure the value of var didn't change
assert shell.get_value('var') == 318
@flaky(max_runs=3)
@pytest.mark.skipif(PY2, reason="Doesn't work on Python 2.7")
def test_pdb_code_and_cmd_separation(ipyconsole, qtbot):
"""Check commands and code are separted."""
shell = ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
control = ipyconsole.get_widget().get_focus_widget()
with qtbot.waitSignal(shell.executed):
shell.execute("%debug print()")
assert "Error" not in control.toPlainText()
with qtbot.waitSignal(shell.executed):
shell.execute("e")
assert "name 'e' is not defined" in control.toPlainText()
with qtbot.waitSignal(shell.executed):
shell.execute("!n")
assert "--Return--" in control.toPlainText()
with qtbot.waitSignal(shell.executed):
shell.execute("a")
assert ("*** NameError: name 'a' is not defined"
not in control.toPlainText())
with qtbot.waitSignal(shell.executed):
shell.execute("abba")
assert "name 'abba' is not defined" in control.toPlainText()
with qtbot.waitSignal(shell.executed):
shell.execute("!abba")
assert "Unknown command 'abba'" in control.toPlainText()
@flaky(max_runs=3)
def test_breakpoint_builtin(ipyconsole, qtbot, tmpdir):
"""Check that the breakpoint builtin is working."""
shell = ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
control = ipyconsole.get_widget().get_focus_widget()
# Code to run
code = dedent("""
print('foo')
breakpoint()
""")
# Write code to file on disk
file = tmpdir.join('test_breakpoint.py')
file.write(code)
# Run file
with qtbot.waitSignal(shell.executed):
shell.execute(f"runfile(filename=r'{str(file)}')")
# Assert we entered debugging after the print statement
qtbot.wait(5000)
assert 'foo' in control.toPlainText()
assert 'IPdb [1]:' in control.toPlainText()
def test_pdb_out(ipyconsole, qtbot):
"""Test that browsing command history is working while debugging."""
shell = ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
# Give focus to the widget that's going to receive clicks
control = ipyconsole.get_widget().get_focus_widget()
control.setFocus()
# Enter debugging mode
with qtbot.waitSignal(shell.executed):
shell.execute('%debug print()')
# Generate some output
with qtbot.waitSignal(shell.executed):
shell.pdb_execute('a = 12 + 1; a')
assert "[1]: 13" in control.toPlainText()
# Generate hide output
with qtbot.waitSignal(shell.executed):
shell.pdb_execute('a = 14 + 1; a;')
assert "[2]: 15" not in control.toPlainText()
# Multiline
with qtbot.waitSignal(shell.executed):
shell.pdb_execute('a = 16 + 1\na')
assert "[3]: 17" in control.toPlainText()
with qtbot.waitSignal(shell.executed):
shell.pdb_execute('a = 18 + 1\na;')
assert "[4]: 19" not in control.toPlainText()
assert "IPdb [4]:" in control.toPlainText()
@flaky(max_runs=3)
@pytest.mark.auto_backend
@pytest.mark.skipif(
running_in_ci() and not os.name == 'nt',
reason="Times out on Linux and macOS")
def test_shutdown_kernel(ipyconsole, qtbot):
"""
Check that the kernel is shutdown after creating plots with the
automatic backend.
This is a regression test for issue spyder-ide/spyder#17011
"""
shell = ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
# Create a Matplotlib plot
with qtbot.waitSignal(shell.executed):
shell.execute("import matplotlib.pyplot as plt; plt.plot(range(10))")
# Get kernel pid
with qtbot.waitSignal(shell.executed):
shell.execute("import os; pid = os.getpid()")
kernel_pid = shell.get_value('pid')
# Close current tab
ipyconsole.get_widget().close_client()
# Wait until new client is created and previous kernel is shutdown
qtbot.wait(5000)
shell = ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
# Detect if previous kernel was killed
with qtbot.waitSignal(shell.executed):
shell.execute(
f"import psutil; kernel_exists = psutil.pid_exists({kernel_pid})"
)
assert not shell.get_value('kernel_exists')
def test_pdb_comprehension_namespace(ipyconsole, qtbot, tmpdir):
"""Check that the debugger handles the namespace of a comprehension."""
shell = ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
control = ipyconsole.get_widget().get_focus_widget()
# Code to run
code = "locals = 1\nx = [locals + i for i in range(2)]"
# Write code to file on disk
file = tmpdir.join('test_breakpoint.py')
file.write(code)
# Run file
with qtbot.waitSignal(shell.executed):
shell.execute(f"debugfile(filename=r'{str(file)}')")
# steps 4 times
for i in range(4):
with qtbot.waitSignal(shell.executed):
shell.pdb_execute("s")
assert "Error" not in control.toPlainText()
with qtbot.waitSignal(shell.executed):
shell.pdb_execute("print('test', locals + i + 10)")
assert "Error" not in control.toPlainText()
assert "test 11" in control.toPlainText()
settings = {
'check_all': False,
'exclude_callables_and_modules': True,
'exclude_capitalized': False,
'exclude_private': True,
'exclude_unsupported': False,
'exclude_uppercase': True,
'excluded_names': [],
'minmax': False,
'show_callable_attributes': True,
'show_special_attributes': False}
shell.call_kernel(
interrupt=True
).set_namespace_view_settings(settings)
namespace = shell.call_kernel(blocking=True).get_namespace_view()
for key in namespace:
assert "_spyderpdb" not in key
if __name__ == "__main__":
pytest.main()
| 34.743103 | 88 | 0.676777 |
import codecs
import glob
import os
import os.path as osp
import psutil
import shutil
import sys
import tempfile
from textwrap import dedent
import threading
import traceback
from unittest.mock import Mock
import IPython
from IPython.core import release as ipy_release
from IPython.core.application import get_ipython_dir
from flaky import flaky
from pkg_resources import parse_version
from pygments.token import Name
import pytest
from qtpy import PYQT5
from qtpy.QtCore import Qt
from qtpy.QtWebEngineWidgets import WEBENGINE
from qtpy.QtWidgets import QMessageBox, QMainWindow
import sympy
from spyder.config.base import get_home_dir, running_in_ci
from spyder.config.gui import get_color_scheme
from spyder.config.manager import ConfigurationManager
from spyder.py3compat import PY2, to_text_string
from spyder.plugins.help.tests.test_plugin import check_text
from spyder.plugins.help.utils.sphinxify import CSS_PATH
from spyder.plugins.ipythonconsole.plugin import IPythonConsole
from spyder.plugins.ipythonconsole.utils.style import create_style_class
from spyder.plugins.ipythonconsole.widgets import ClientWidget
from spyder.utils.programs import get_temp_dir
from spyder.utils.conda import is_conda_env
SHELL_TIMEOUT = 20000
TEMP_DIRECTORY = tempfile.gettempdir()
NON_ASCII_DIR = osp.join(TEMP_DIRECTORY, u'測試', u'اختبار')
NEW_DIR = 'new_workingdir'
def get_console_font_color(syntax_style):
styles = create_style_class(syntax_style).styles
font_color = styles[Name]
return font_color
def get_console_background_color(style_sheet):
background_color = style_sheet.split('background-color:')[1]
background_color = background_color.split(';')[0]
return background_color
def get_conda_test_env(test_env_name=u'spytest-ž'):
if 'envs' in sys.prefix:
root_prefix = os.path.dirname(os.path.dirname(sys.prefix))
else:
root_prefix = sys.prefix
test_env_prefix = os.path.join(root_prefix, 'envs', test_env_name)
if os.name == 'nt':
test_env_executable = os.path.join(test_env_prefix, 'python.exe')
else:
test_env_executable = os.path.join(test_env_prefix, 'bin', 'python')
return test_env_executable
@pytest.fixture
def ipyconsole(qtbot, request, tmpdir):
configuration = ConfigurationManager(conf_path=str(tmpdir))
class MainWindowMock(QMainWindow):
def get_spyder_pythonpath(self):
return configuration.get('main', 'spyder_pythonpath', [])
def __getattr__(self, attr):
if attr == 'consoles_menu_actions':
return []
elif attr == 'editor':
return None
else:
return Mock()
configuration.set('ipython_console', 'pylab/backend', 0)
use_startup_wdir = request.node.get_closest_marker('use_startup_wdir')
if use_startup_wdir:
new_wdir = osp.join(os.getcwd(), NEW_DIR)
if not osp.exists(new_wdir):
os.mkdir(new_wdir)
configuration.set('workingdir', 'console/use_fixed_directory', True)
configuration.set('workingdir', 'console/fixed_directory', new_wdir)
else:
configuration.set('workingdir', 'console/use_fixed_directory', False)
configuration.set(
'workingdir', 'console/fixed_directory', get_home_dir())
non_ascii_dir = request.node.get_closest_marker('non_ascii_dir')
if non_ascii_dir:
test_dir = NON_ASCII_DIR
else:
test_dir = ''
no_stderr_file = request.node.get_closest_marker('no_stderr_file')
if no_stderr_file:
test_no_stderr = 'True'
else:
test_no_stderr = ''
auto_backend = request.node.get_closest_marker('auto_backend')
if auto_backend:
configuration.set('ipython_console', 'pylab/backend', 1)
tk_backend = request.node.get_closest_marker('tk_backend')
if tk_backend:
configuration.set('ipython_console', 'pylab/backend', 8)
pylab_client = request.node.get_closest_marker('pylab_client')
is_pylab = True if pylab_client else False
sympy_client = request.node.get_closest_marker('sympy_client')
is_sympy = True if sympy_client else False
cython_client = request.node.get_closest_marker('cython_client')
is_cython = True if cython_client else False
external_interpreter = request.node.get_closest_marker(
'external_interpreter')
if external_interpreter:
configuration.set('main_interpreter', 'default', False)
configuration.set('main_interpreter', 'executable', sys.executable)
else:
configuration.set('main_interpreter', 'default', True)
configuration.set('main_interpreter', 'executable', '')
test_environment_interpreter = request.node.get_closest_marker(
'test_environment_interpreter')
if test_environment_interpreter:
configuration.set('main_interpreter', 'default', False)
configuration.set(
'main_interpreter', 'executable', get_conda_test_env())
else:
configuration.set('main_interpreter', 'default', True)
configuration.set('main_interpreter', 'executable', '')
configuration.set('appearance', 'css_path', CSS_PATH)
os.environ['IPYCONSOLE_TESTING'] = 'True'
os.environ['IPYCONSOLE_TEST_DIR'] = test_dir
os.environ['IPYCONSOLE_TEST_NO_STDERR'] = test_no_stderr
window = MainWindowMock()
console = IPythonConsole(parent=window, configuration=configuration)
console._register()
console.create_new_client(is_pylab=is_pylab,
is_sympy=is_sympy,
is_cython=is_cython)
window.setCentralWidget(console.get_widget())
configuration.set('ipython_console', 'pdb_use_exclamation_mark', True)
if not sys.platform == "darwin":
qtbot.addWidget(window)
window.resize(640, 480)
window.show()
shell = console.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
known_leak = request.node.get_closest_marker('known_leak')
if os.name != 'nt' and not known_leak:
init_threads = [
repr(thread) for thread in threading.enumerate()
if not isinstance(thread, threading._DummyThread)]
proc = psutil.Process()
init_files = [repr(f) for f in proc.open_files()]
init_subprocesses = [repr(f) for f in proc.children()]
yield console
if request.node.rep_setup.passed:
if request.node.rep_call.failed:
print(console.get_current_shellwidget(
)._control.toPlainText())
client = console.get_current_client()
if client.info_page != client.blank_page:
print('info_page')
print(client.info_page)
console.on_close()
window.close()
os.environ.pop('IPYCONSOLE_TESTING')
os.environ.pop('IPYCONSOLE_TEST_DIR')
os.environ.pop('IPYCONSOLE_TEST_NO_STDERR')
if os.name == 'nt' or known_leak:
return
def show_diff(init_list, now_list, name):
sys.stderr.write(f"Extra {name} before test:\n")
for item in init_list:
if item in now_list:
now_list.remove(item)
else:
sys.stderr.write(item + "\n")
sys.stderr.write(f"Extra {name} after test:\n")
for item in now_list:
sys.stderr.write(item + "\n")
try:
def threads_condition():
threads = [
thread for thread in threading.enumerate()
if not isinstance(thread, threading._DummyThread)]
return (len(init_threads) >= len(threads))
qtbot.waitUntil(threads_condition, timeout=SHELL_TIMEOUT)
except Exception:
now_threads = [
thread for thread in threading.enumerate()
if not isinstance(thread, threading._DummyThread)]
threads = [repr(t) for t in now_threads]
show_diff(init_threads, threads, "thread")
sys.stderr.write("Running Threads stacks:\n")
now_thread_ids = [t.ident for t in now_threads]
for threadId, frame in sys._current_frames().items():
if threadId in now_thread_ids:
sys.stderr.write("\nThread " + str(threads) + ":\n")
traceback.print_stack(frame)
raise
try:
qtbot.waitUntil(lambda: (
len(init_subprocesses) - 1 >= len(proc.children())),
timeout=SHELL_TIMEOUT)
except Exception:
subprocesses = [repr(f) for f in proc.children()]
show_diff(init_subprocesses, subprocesses, "processes")
raise
try:
qtbot.waitUntil(
lambda: (len(init_files) >= len(proc.open_files())),
timeout=SHELL_TIMEOUT)
except Exception:
files = [repr(f) for f in proc.open_files()]
show_diff(init_files, files, "files")
raise
@flaky(max_runs=3)
@pytest.mark.external_interpreter
def test_banners(ipyconsole, qtbot):
shell = ipyconsole.get_current_shellwidget()
control = shell._control
text = control.toPlainText().splitlines()
if "Update LANGUAGE_CODES" in text[0]:
text = text[1:]
while not text[0].strip():
text = text[1:]
py_ver = sys.version.splitlines()[0].strip()
assert py_ver in text[0]
assert 'license' in text[1]
assert '' == text[2]
assert ipy_release.version in text[3]
short_banner = shell.short_banner()
py_ver = sys.version.split(' ')[0]
expected = 'Python %s -- IPython %s' % (py_ver, ipy_release.version)
assert expected == short_banner
@flaky(max_runs=3)
@pytest.mark.parametrize(
"function,signature,documentation",
[("arange",
["start", "stop"],
["Return evenly spaced values within a given interval.<br>",
"<br>Python built-in `range` function, but returns an ndarray ..."]),
("vectorize",
["pyfunc", "otype", "signature"],
["Generalized function class.<br>",
"Define a vectorized function which takes a nested sequence ..."]),
("absolute",
["x", "/", "out"],
["Parameters<br>", "x : array_like ..."])]
)
@pytest.mark.skipif(not os.name == 'nt',
reason="Times out on macOS and fails on Linux")
def test_get_calltips(ipyconsole, qtbot, function, signature, documentation):
shell = ipyconsole.get_current_shellwidget()
control = shell._control
with qtbot.waitSignal(shell.executed):
shell.execute('import numpy as np')
with qtbot.waitSignal(shell.kernel_client.shell_channel.message_received):
qtbot.keyClicks(control, 'np.' + function + '(')
qtbot.waitUntil(lambda: control.calltip_widget.isVisible())
assert control.calltip_widget.isVisible()
control.calltip_widget.hide()
for element in signature:
assert element in control.calltip_widget.text()
for element in documentation:
assert element in control.calltip_widget.text()
@flaky(max_runs=3)
@pytest.mark.auto_backend
@pytest.mark.skipif(
running_in_ci() and not os.name == 'nt',
reason="Times out on Linux and macOS")
def test_auto_backend(ipyconsole, qtbot):
shell = ipyconsole.get_current_shellwidget()
with qtbot.waitSignal(shell.executed):
shell.execute("ip = get_ipython(); ip.kernel.eventloop")
control = ipyconsole.get_widget().get_focus_widget()
assert 'NOTE' not in control.toPlainText()
assert 'Error' not in control.toPlainText()
assert 'loop_qt5' in control.toPlainText()
@flaky(max_runs=3)
@pytest.mark.tk_backend
@pytest.mark.skipif(
running_in_ci() and not os.name == 'nt',
reason="Times out on Linux and macOS")
def test_tk_backend(ipyconsole, qtbot):
shell = ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
with qtbot.waitSignal(shell.executed):
shell.execute("ip = get_ipython(); ip.kernel.eventloop")
control = ipyconsole.get_widget().get_focus_widget()
assert 'loop_tk' in control.toPlainText()
@flaky(max_runs=3)
@pytest.mark.pylab_client
def test_pylab_client(ipyconsole, qtbot):
shell = ipyconsole.get_current_shellwidget()
with qtbot.waitSignal(shell.executed):
shell.execute("e")
control = ipyconsole.get_widget().get_focus_widget()
assert 'Error' not in control.toPlainText()
shell.reset_namespace()
qtbot.wait(1000)
with qtbot.waitSignal(shell.executed):
shell.execute("e")
control = ipyconsole.get_widget().get_focus_widget()
assert 'Error' not in control.toPlainText()
@flaky(max_runs=3)
@pytest.mark.sympy_client
@pytest.mark.xfail('1.0' < sympy.__version__ < '1.2',
reason="A bug with sympy 1.1.1 and IPython-Qtconsole")
def test_sympy_client(ipyconsole, qtbot):
shell = ipyconsole.get_current_shellwidget()
with qtbot.waitSignal(shell.executed):
shell.execute("x")
control = ipyconsole.get_widget().get_focus_widget()
assert 'NameError' not in control.toPlainText()
shell.reset_namespace()
qtbot.wait(1000)
with qtbot.waitSignal(shell.executed):
shell.execute("x")
control = ipyconsole.get_widget().get_focus_widget()
assert 'NameError' not in control.toPlainText()
@flaky(max_runs=3)
@pytest.mark.cython_client
@pytest.mark.skipif(
(not sys.platform.startswith('linux') or
parse_version(ipy_release.version) == parse_version('7.11.0')),
reason="It only works reliably on Linux and fails for IPython 7.11.0")
def test_cython_client(ipyconsole, qtbot):
shell = ipyconsole.get_current_shellwidget()
with qtbot.waitSignal(shell.executed, timeout=SHELL_TIMEOUT):
shell.execute("%%cython\n"
"cdef int ctest(int x, int y):\n"
" return x + y")
control = ipyconsole.get_widget().get_focus_widget()
assert 'Error' not in control.toPlainText()
shell.reset_namespace()
qtbot.wait(1000)
with qtbot.waitSignal(shell.executed, timeout=SHELL_TIMEOUT):
shell.execute("%%cython\n"
"cdef int ctest(int x, int y):\n"
" return x + y")
control = ipyconsole.get_widget().get_focus_widget()
assert 'Error' not in control.toPlainText()
@flaky(max_runs=3)
def test_tab_rename_for_slaves(ipyconsole, qtbot):
cf = ipyconsole.get_current_client().connection_file
ipyconsole.get_widget()._create_client_for_kernel(cf, None, None, None)
qtbot.waitUntil(lambda: len(ipyconsole.get_clients()) == 2)
ipyconsole.get_widget().rename_tabs_after_change('foo')
assert 'foo' in ipyconsole.get_clients()[0].get_name()
assert 'foo' in ipyconsole.get_clients()[1].get_name()
@flaky(max_runs=3)
def test_no_repeated_tabs_name(ipyconsole, qtbot):
ipyconsole.get_widget().rename_tabs_after_change('foo')
ipyconsole.create_new_client()
ipyconsole.get_widget().rename_tabs_after_change('foo')
client_name = ipyconsole.get_current_client().get_name()
assert '2' in client_name
@flaky(max_runs=3)
@pytest.mark.skipif(
running_in_ci() and sys.platform == 'darwin',
reason="Hangs sometimes on macOS")
def test_tabs_preserve_name_after_move(ipyconsole, qtbot):
# Create a new client
ipyconsole.create_new_client()
# Move tabs
ipyconsole.get_widget().tabwidget.tabBar().moveTab(0, 1)
# Assert the second client is in the first position
client_name = ipyconsole.get_clients()[0].get_name()
assert '2' in client_name
@flaky(max_runs=3)
def test_conf_env_vars(ipyconsole, qtbot):
# Wait until the window is fully up
shell = ipyconsole.get_current_shellwidget()
# Get a CONF env var
with qtbot.waitSignal(shell.executed):
shell.execute("import os; a = os.environ.get('SPY_SYMPY_O')")
# Assert we get the assigned value correctly
assert shell.get_value('a') == 'False'
@flaky(max_runs=3)
@pytest.mark.no_stderr_file
def test_no_stderr_file(ipyconsole, qtbot):
# Wait until the window is fully up
shell = ipyconsole.get_current_shellwidget()
# Execute a simple assignment
with qtbot.waitSignal(shell.executed):
shell.execute('a = 1')
# Assert we get the assigned value correctly
assert shell.get_value('a') == 1
@pytest.mark.non_ascii_dir
@flaky(max_runs=3)
@pytest.mark.skipif(os.name == 'nt', reason="It fails on Windows")
def test_non_ascii_stderr_file(ipyconsole, qtbot):
# Wait until the window is fully up
shell = ipyconsole.get_current_shellwidget()
# Execute a simple assignment
with qtbot.waitSignal(shell.executed):
shell.execute('a = 1')
# Assert we get the assigned value
assert shell.get_value('a') == 1
@flaky(max_runs=3)
@pytest.mark.skipif(PY2 and sys.platform == 'darwin',
reason="It hangs frequently on Python 2.7 and macOS")
def test_console_import_namespace(ipyconsole, qtbot):
# Wait until the window is fully up
shell = ipyconsole.get_current_shellwidget()
# Import numpy
with qtbot.waitSignal(shell.executed):
shell.execute('from numpy import *')
# Assert we get the e value correctly
assert shell.get_value('e') == 2.718281828459045
@flaky(max_runs=3)
def test_console_disambiguation(ipyconsole, qtbot):
# Create directories and file for TEMP_DIRECTORY/a/b/c.py
# and TEMP_DIRECTORY/a/d/c.py
dir_b = osp.join(TEMP_DIRECTORY, 'a', 'b')
filename_b = osp.join(dir_b, 'c.py')
if not osp.isdir(dir_b):
os.makedirs(dir_b)
if not osp.isfile(filename_b):
file_c = open(filename_b, 'w+')
file_c.close()
dir_d = osp.join(TEMP_DIRECTORY, 'a', 'd')
filename_d = osp.join(dir_d, 'c.py')
if not osp.isdir(dir_d):
os.makedirs(dir_d)
if not osp.isfile(filename_d):
file_e = open(filename_d, 'w+')
file_e.close()
# Create new client and assert name without disambiguation
ipyconsole.create_client_for_file(filename_b)
client = ipyconsole.get_current_client()
assert client.get_name() == 'c.py/A'
# Create new client and assert name with disambiguation
ipyconsole.create_client_for_file(filename_d)
client = ipyconsole.get_current_client()
assert client.get_name() == 'c.py - d/A'
ipyconsole.get_widget().tabwidget.setCurrentIndex(1)
client = ipyconsole.get_current_client()
assert client.get_name() == 'c.py - b/A'
@flaky(max_runs=3)
def test_console_coloring(ipyconsole, qtbot):
config_options = ipyconsole.get_widget().config_options()
syntax_style = config_options.JupyterWidget.syntax_style
style_sheet = config_options.JupyterWidget.style_sheet
console_font_color = get_console_font_color(syntax_style)
console_background_color = get_console_background_color(style_sheet)
selected_color_scheme = ipyconsole.get_conf(
'selected', section='appearance')
color_scheme = get_color_scheme(selected_color_scheme)
editor_background_color = color_scheme['background']
editor_font_color = color_scheme['normal'][0]
console_background_color = console_background_color.replace("'", "")
editor_background_color = editor_background_color.replace("'", "")
console_font_color = console_font_color.replace("'", "")
editor_font_color = editor_font_color.replace("'", "")
assert console_background_color.strip() == editor_background_color.strip()
assert console_font_color.strip() == editor_font_color.strip()
@flaky(max_runs=3)
def test_set_cwd(ipyconsole, qtbot, tmpdir):
# Wait until the window is fully up
shell = ipyconsole.get_current_shellwidget()
# spyder-ide/spyder#6451.
savetemp = shell._cwd
tempdir = to_text_string(tmpdir.mkdir("queen's"))
shell.set_cwd(tempdir)
with qtbot.waitSignal(shell.executed):
shell.execute("import os; cwd = os.getcwd()")
assert shell.get_value('cwd') == tempdir
shell.set_cwd(savetemp)
@flaky(max_runs=3)
def test_get_cwd(ipyconsole, qtbot, tmpdir):
shell = ipyconsole.get_current_shellwidget()
avetemp = shell._cwd
tempdir = to_text_string(tmpdir.mkdir("queen's"))
assert shell._cwd != tempdir
# Need to escape \ on Windows.
if os.name == 'nt':
tempdir = tempdir.replace(u"\\", u"\\\\")
# Change directory in the console.
with qtbot.waitSignal(shell.executed):
shell.execute(u"import os; os.chdir(u'''{}''')".format(tempdir))
# Ask for directory.
with qtbot.waitSignal(shell.sig_working_directory_changed):
shell.update_cwd()
if os.name == 'nt':
tempdir = tempdir.replace(u"\\\\", u"\\")
assert shell._cwd == tempdir
shell.set_cwd(savetemp)
@flaky(max_runs=3)
def test_request_env(ipyconsole, qtbot):
shell = ipyconsole.get_current_shellwidget()
# Add a new entry to os.environ
with qtbot.waitSignal(shell.executed):
shell.execute("import os; os.environ['FOO'] = 'bar'" )
# Ask for os.environ contents
with qtbot.waitSignal(shell.sig_show_env) as blocker:
shell.request_env()
# Get env contents from the signal
env_contents = blocker.args[0]
# Assert that our added entry is part of os.environ
assert env_contents['FOO'] == 'bar'
@flaky(max_runs=3)
@pytest.mark.skipif(os.name == 'nt',
reason="Fails due to differences in path handling")
def test_request_syspath(ipyconsole, qtbot, tmpdir):
shell = ipyconsole.get_current_shellwidget()
# Add a new entry to sys.path
with qtbot.waitSignal(shell.executed):
tmp_dir = to_text_string(tmpdir)
shell.execute("import sys; sys.path.append('%s')" % tmp_dir)
# Ask for sys.path contents
with qtbot.waitSignal(shell.sig_show_syspath) as blocker:
shell.request_syspath()
# Get sys.path contents from the signal
syspath_contents = blocker.args[0]
# Assert that our added entry is part of sys.path
assert tmp_dir in syspath_contents
@flaky(max_runs=10)
@pytest.mark.skipif(os.name == 'nt', reason="It doesn't work on Windows")
def test_save_history_dbg(ipyconsole, qtbot):
shell = ipyconsole.get_current_shellwidget()
control = ipyconsole.get_widget().get_focus_widget()
control.setFocus()
# Enter debugging mode
with qtbot.waitSignal(shell.executed):
shell.execute('%debug print()')
# Enter an expression
with qtbot.waitSignal(shell.executed):
qtbot.keyClicks(control, 'aa = 10')
qtbot.keyClick(control, Qt.Key_Enter)
# Add a pdb command to make sure it is not saved
with qtbot.waitSignal(shell.executed):
qtbot.keyClicks(control, '!u')
qtbot.keyClick(control, Qt.Key_Enter)
# Add an empty line to make sure it is not saved
with qtbot.waitSignal(shell.executed):
qtbot.keyClick(control, Qt.Key_Enter)
# Clear console (for some reason using shell.clear_console
# doesn't work here)
shell.reset(clear=True)
qtbot.waitUntil(lambda: shell.is_waiting_pdb_input())
assert shell.is_waiting_pdb_input()
qtbot.keyClick(control, Qt.Key_Up)
assert 'aa = 10' in control.toPlainText()
ipyconsole.create_new_client()
shell = ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
control = ipyconsole.get_widget().get_focus_widget()
control.setFocus()
# Enter debugging mode
with qtbot.waitSignal(shell.executed):
shell.execute('%debug print()')
# Press Up arrow button and assert we get the last
# introduced command
qtbot.keyClick(control, Qt.Key_Up)
assert 'aa = 10' in control.toPlainText()
with qtbot.waitSignal(shell.executed):
qtbot.keyClick(control, Qt.Key_Enter)
# Add a multiline statment and ckeck we can browse it correctly
shell._pdb_history.append('if True:\n print(1)')
shell._pdb_history.append('print(2)')
shell._pdb_history.append('if True:\n print(10)')
shell._pdb_history_index = len(shell._pdb_history)
# The continuation prompt is here
qtbot.keyClick(control, Qt.Key_Up)
assert '...: print(10)' in control.toPlainText()
shell._control.set_cursor_position(shell._control.get_position('eof') - 25)
qtbot.keyClick(control, Qt.Key_Up)
assert '...: print(1)' in control.toPlainText()
@flaky(max_runs=3)
@pytest.mark.skipif(PY2 or IPython.version_info < (7, 17),
reason="insert is not the same in py2")
def test_dbg_input(ipyconsole, qtbot):
shell = ipyconsole.get_current_shellwidget()
# Give focus to the widget that's going to receive clicks
control = ipyconsole.get_widget().get_focus_widget()
control.setFocus()
with qtbot.waitSignal(shell.executed):
shell.execute("%debug print('Hello', input('name'))")
shell.pdb_execute('!n')
qtbot.wait(100)
qtbot.waitUntil(lambda: control.toPlainText().split()[-1] == 'name')
# as this is not a pdb prompt
shell.pdb_execute('!n')
shell.pdb_execute('aa = 10')
qtbot.wait(500)
assert control.toPlainText().split()[-1] == 'name'
shell.kernel_client.input('test')
qtbot.waitUntil(lambda: 'Hello test' in control.toPlainText())
@flaky(max_runs=3)
@pytest.mark.skipif(PY2, reason="It doesn't work on PY2")
def test_unicode_vars(ipyconsole, qtbot):
shell = ipyconsole.get_current_shellwidget()
with qtbot.waitSignal(shell.executed):
shell.execute('д = 10')
assert shell.get_value('д') == 10
shell.set_value('д', 20)
qtbot.waitUntil(lambda: shell.get_value('д') == 20)
assert shell.get_value('д') == 20
@flaky(max_runs=3)
def test_read_stderr(ipyconsole, qtbot):
client = ipyconsole.get_current_client()
content = 'Test text'
stderr_file = client.stderr_obj.filename
codecs.open(stderr_file, 'w', 'cp437').write(content)
assert content == client.stderr_obj.get_contents()
@flaky(max_runs=10)
@pytest.mark.no_xvfb
@pytest.mark.skipif(running_in_ci() and os.name == 'nt',
reason="Times out on Windows")
def test_values_dbg(ipyconsole, qtbot):
shell = ipyconsole.get_current_shellwidget()
control = ipyconsole.get_widget().get_focus_widget()
control.setFocus()
# Enter debugging mode
with qtbot.waitSignal(shell.executed):
shell.execute('%debug print()')
# Get value
with qtbot.waitSignal(shell.executed):
shell.execute('aa = 10')
assert 'aa = 10' in control.toPlainText()
assert shell.get_value('aa') == 10
# Set value
shell.set_value('aa', 20)
qtbot.waitUntil(lambda: shell.get_value('aa') == 20)
assert shell.get_value('aa') == 20
# Copy value
shell.copy_value('aa', 'bb')
qtbot.waitUntil(lambda: shell.get_value('bb') == 20)
assert shell.get_value('bb') == 20
# Remove value
shell.remove_value('aa')
def is_defined(val):
try:
shell.get_value(val)
return True
except KeyError:
return False
qtbot.waitUntil(lambda: not is_defined('aa'))
with qtbot.waitSignal(shell.executed):
shell.execute('aa')
# Wait until the message is recieved
assert "*** NameError: name 'aa' is not defined" in control.toPlainText()
@flaky(max_runs=3)
def test_execute_events_dbg(ipyconsole, qtbot):
shell = ipyconsole.get_current_shellwidget()
# Give focus to the widget that's going to receive clicks
control = ipyconsole.get_widget().get_focus_widget()
control.setFocus()
with qtbot.waitSignal(shell.executed):
shell.execute('import matplotlib.pyplot as plt')
with qtbot.waitSignal(shell.executed):
shell.execute('%debug print()')
ipyconsole.set_conf('pdb_execute_events', True)
shell.set_pdb_execute_events(True)
qtbot.keyClicks(control, 'plt.plot(range(10))')
with qtbot.waitSignal(shell.executed):
qtbot.keyClick(control, Qt.Key_Enter)
assert shell._control.toHtml().count('img src') == 1
# Set processing events to False
ipyconsole.set_conf('pdb_execute_events', False)
shell.set_pdb_execute_events(False)
# Test reset magic
qtbot.keyClicks(control, 'plt.plot(range(10))')
with qtbot.waitSignal(shell.executed):
qtbot.keyClick(control, Qt.Key_Enter)
# Assert that there's no new plots in the console
assert shell._control.toHtml().count('img src') == 1
qtbot.keyClicks(control, 'plt.show()')
with qtbot.waitSignal(shell.executed):
qtbot.keyClick(control, Qt.Key_Enter)
assert shell._control.toHtml().count('img src') == 2
@flaky(max_runs=3)
def test_run_doctest(ipyconsole, qtbot):
shell = ipyconsole.get_current_shellwidget()
code = dedent('''
def add(x, y):
"""
>>> add(1, 2)
3
>>> add(5.1, 2.2)
7.3
"""
return x + y
''')
# Run code
with qtbot.waitSignal(shell.executed):
shell.execute(code)
# Import doctest
with qtbot.waitSignal(shell.executed):
shell.execute('import doctest')
# Run doctest
with qtbot.waitSignal(shell.executed):
shell.execute('doctest.testmod()')
# Assert that doctests were run correctly
assert "TestResults(failed=0, attempted=2)" in shell._control.toPlainText()
@flaky(max_runs=3)
@pytest.mark.skipif(os.name == 'nt' or (PY2 and PYQT5),
reason="It times out frequently")
def test_mpl_backend_change(ipyconsole, qtbot):
shell = ipyconsole.get_current_shellwidget()
# Import Matplotlib
with qtbot.waitSignal(shell.executed):
shell.execute('import matplotlib.pyplot as plt')
# Generate a plot
with qtbot.waitSignal(shell.executed):
shell.execute('plt.plot(range(10))')
# Change backends
with qtbot.waitSignal(shell.executed):
shell.execute('%matplotlib tk')
# Generate another plot
with qtbot.waitSignal(shell.executed):
shell.execute('plt.plot(range(10))')
# Assert that there's a single inline plot in the console
assert shell._control.toHtml().count('img src') == 1
@flaky(max_runs=10)
@pytest.mark.skipif(running_in_ci(), reason="Fails frequently in CI")
def test_ctrl_c_dbg(ipyconsole, qtbot):
shell = ipyconsole.get_current_shellwidget()
control = ipyconsole.get_widget().get_focus_widget()
control.setFocus()
# Enter debugging mode
with qtbot.waitSignal(shell.executed):
shell.execute('%debug print()')
# Test Ctrl+C
qtbot.keyClick(control, Qt.Key_C, modifier=Qt.ControlModifier)
qtbot.waitUntil(
lambda: 'For copying text while debugging, use Ctrl+Shift+C' in
control.toPlainText(), timeout=2000)
assert 'For copying text while debugging, use Ctrl+Shift+C' in control.toPlainText()
@flaky(max_runs=10)
@pytest.mark.skipif(os.name == 'nt', reason="It doesn't work on Windows")
def test_clear_and_reset_magics_dbg(ipyconsole, qtbot):
shell = ipyconsole.get_current_shellwidget()
control = ipyconsole.get_widget().get_focus_widget()
control.setFocus()
# Enter debugging mode
with qtbot.waitSignal(shell.executed):
shell.execute('%debug print()')
# Test clear magic
shell.clear_console()
qtbot.waitUntil(lambda: '\nIPdb [2]: ' == control.toPlainText())
# Test reset magic
qtbot.keyClicks(control, 'bb = 10')
with qtbot.waitSignal(shell.executed):
qtbot.keyClick(control, Qt.Key_Enter)
assert shell.get_value('bb') == 10
shell.reset_namespace()
qtbot.wait(1000)
qtbot.keyClicks(control, 'bb')
with qtbot.waitSignal(shell.executed):
qtbot.keyClick(control, Qt.Key_Enter)
assert "*** NameError: name 'bb' is not defined" in control.toPlainText()
@flaky(max_runs=3)
def test_restart_kernel(ipyconsole, mocker, qtbot):
# Mock method we want to check
mocker.patch.object(ClientWidget, "_show_mpl_backend_errors")
ipyconsole.create_new_client()
shell = ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
# Do an assignment to verify that it's not there after restarting
with qtbot.waitSignal(shell.executed):
shell.execute('a = 10')
with qtbot.waitSignal(shell.executed):
shell.execute('import sys; sys.__stderr__.write("HEL"+"LO")')
qtbot.waitUntil(
lambda: 'HELLO' in shell._control.toPlainText(), timeout=SHELL_TIMEOUT)
# Restart kernel and wait until it's up again
shell._prompt_html = None
ipyconsole.restart_kernel()
qtbot.waitUntil(
lambda: shell._prompt_html is not None, timeout=SHELL_TIMEOUT)
assert 'Restarting kernel...' in shell._control.toPlainText()
assert 'HELLO' not in shell._control.toPlainText()
assert not shell.is_defined('a')
assert ClientWidget._show_mpl_backend_errors.call_count == 2
@flaky(max_runs=3)
def test_load_kernel_file_from_id(ipyconsole, qtbot):
client = ipyconsole.get_current_client()
connection_file = osp.basename(client.connection_file)
id_ = connection_file.split('kernel-')[-1].split('.json')[0]
ipyconsole.get_widget()._create_client_for_kernel(id_, None, None, None)
qtbot.waitUntil(lambda: len(ipyconsole.get_clients()) == 2)
new_client = ipyconsole.get_clients()[1]
assert new_client.id_ == dict(int_id='1', str_id='B')
@flaky(max_runs=3)
def test_load_kernel_file_from_location(ipyconsole, qtbot, tmpdir):
client = ipyconsole.get_current_client()
fname = osp.basename(client.connection_file)
connection_file = to_text_string(tmpdir.join(fname))
shutil.copy2(client.connection_file, connection_file)
ipyconsole.get_widget()._create_client_for_kernel(connection_file, None, None, None)
qtbot.waitUntil(lambda: len(ipyconsole.get_clients()) == 2)
assert len(ipyconsole.get_clients()) == 2
@flaky(max_runs=3)
def test_load_kernel_file(ipyconsole, qtbot, tmpdir):
shell = ipyconsole.get_current_shellwidget()
client = ipyconsole.get_current_client()
ipyconsole.get_widget()._create_client_for_kernel(
client.connection_file, None, None, None)
qtbot.waitUntil(lambda: len(ipyconsole.get_clients()) == 2)
new_client = ipyconsole.get_clients()[1]
new_shell = new_client.shellwidget
qtbot.waitUntil(lambda: new_shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
with qtbot.waitSignal(new_shell.executed):
new_shell.execute('a = 10')
assert new_client.id_ == dict(int_id='1', str_id='B')
assert shell.get_value('a') == new_shell.get_value('a')
@flaky(max_runs=3)
def test_sys_argv_clear(ipyconsole, qtbot):
shell = ipyconsole.get_current_shellwidget()
with qtbot.waitSignal(shell.executed):
shell.execute('import sys; A = sys.argv')
argv = shell.get_value("A")
assert argv == ['']
@flaky(max_runs=5)
@pytest.mark.skipif(os.name == 'nt', reason="Fails sometimes on Windows")
def test_set_elapsed_time(ipyconsole, qtbot):
client = ipyconsole.get_current_client()
ipyconsole.get_widget().set_show_elapsed_time_current_client(True)
client.t0 -= 120
with qtbot.waitSignal(client.timer.timeout, timeout=5000):
ipyconsole.get_widget().set_client_elapsed_time(client)
assert ('00:02:00' in client.time_label.text() or
'00:02:01' in client.time_label.text())
with qtbot.waitSignal(client.timer.timeout, timeout=5000):
pass
assert ('00:02:01' in client.time_label.text() or
'00:02:02' in client.time_label.text())
client.t0 += 2000
with qtbot.waitSignal(client.timer.timeout, timeout=5000):
pass
assert '00:00:00' in client.time_label.text()
client.timer.timeout.disconnect(client.show_time)
@flaky(max_runs=3)
@pytest.mark.skipif(os.name == 'nt', reason="Doesn't work on Windows")
def test_stderr_file_is_removed_one_kernel(ipyconsole, qtbot, monkeypatch):
client = ipyconsole.get_current_client()
# In a normal situation file should exist
monkeypatch.setattr(QMessageBox, 'question',
classmethod(lambda *args: QMessageBox.Yes))
assert osp.exists(client.stderr_obj.filename)
ipyconsole.close_client(client=client)
assert not osp.exists(client.stderr_obj.filename)
@flaky(max_runs=3)
@pytest.mark.skipif(
not sys.platform.startswith('linux'),
reason="Doesn't work on Windows and hangs sometimes on Mac")
def test_stderr_file_is_removed_two_kernels(ipyconsole, qtbot, monkeypatch):
client = ipyconsole.get_current_client()
ipyconsole.get_widget()._create_client_for_kernel(
client.connection_file, None, None, None)
assert len(ipyconsole.get_widget().get_related_clients(client)) == 1
other_client = ipyconsole.get_widget().get_related_clients(client)[0]
assert client.stderr_obj.filename == other_client.stderr_obj.filename
monkeypatch.setattr(QMessageBox, 'question',
classmethod(lambda *args: QMessageBox.Yes))
assert osp.exists(client.stderr_obj.filename)
ipyconsole.close_client(client=client)
assert not osp.exists(client.stderr_obj.filename)
@flaky(max_runs=3)
@pytest.mark.skipif(os.name == 'nt', reason="Doesn't work on Windows")
def test_stderr_file_remains_two_kernels(ipyconsole, qtbot, monkeypatch):
client = ipyconsole.get_current_client()
# New client with the same kernel
ipyconsole.get_widget()._create_client_for_kernel(
client.connection_file, None, None, None)
assert len(ipyconsole.get_widget().get_related_clients(client)) == 1
other_client = ipyconsole.get_widget().get_related_clients(client)[0]
assert client.stderr_obj.filename == other_client.stderr_obj.filename
# In a normal situation file should exist
monkeypatch.setattr(QMessageBox, "question",
classmethod(lambda *args: QMessageBox.No))
assert osp.exists(client.stderr_obj.filename)
ipyconsole.close_client(client=client)
assert osp.exists(client.stderr_obj.filename)
@flaky(max_runs=3)
@pytest.mark.skipif(sys.platform == 'darwin',
reason="Fails sometimes on macOS")
def test_kernel_crash(ipyconsole, qtbot):
# Create an IPython kernel config file with a bad config
ipy_kernel_cfg = osp.join(get_ipython_dir(), 'profile_default',
'ipython_kernel_config.py')
with open(ipy_kernel_cfg, 'w') as f:
# This option must be a string, not an int
f.write("c.InteractiveShellApp.extra_extension = 1")
ipyconsole.create_new_client()
# Assert that the console is showing an error
qtbot.waitUntil(lambda: ipyconsole.get_clients()[-1].is_error_shown,
timeout=6000)
error_client = ipyconsole.get_clients()[-1]
assert error_client.is_error_shown
# Assert the error contains the text we expect
webview = error_client.infowidget
if WEBENGINE:
webpage = webview.page()
else:
webpage = webview.page().mainFrame()
qtbot.waitUntil(
lambda: check_text(webpage, "Bad config encountered"),
timeout=6000)
# Remove bad kernel config file
os.remove(ipy_kernel_cfg)
@flaky(max_runs=3)
@pytest.mark.skipif(not os.name == 'nt', reason="Only necessary on Windows")
def test_remove_old_std_files(ipyconsole, qtbot):
shell = ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
# Create empty std files in our temp dir to see if they are removed
# correctly.
tmpdir = get_temp_dir()
open(osp.join(tmpdir, 'foo.stderr'), 'a').close()
open(osp.join(tmpdir, 'foo.stdout'), 'a').close()
# Assert that only old std files are removed
ipyconsole._remove_old_std_files()
assert not osp.isfile(osp.join(tmpdir, 'foo.stderr'))
assert not osp.isfile(osp.join(tmpdir, 'foo.stdout'))
# The current kernel std files should be present
for fname in glob.glob(osp.join(tmpdir, '*')):
assert osp.basename(fname).startswith('kernel')
assert any(
[osp.basename(fname).endswith(ext)
for ext in ('.stderr', '.stdout', '.fault')]
)
@flaky(max_runs=10)
@pytest.mark.use_startup_wdir
@pytest.mark.skipif(os.name == 'nt', reason="Too flaky on Windows")
def test_console_working_directory(ipyconsole, qtbot):
shell = ipyconsole.get_current_shellwidget()
with qtbot.waitSignal(shell.executed):
shell.execute('import os; cwd = os.getcwd()')
current_wdir = shell.get_value('cwd')
folders = osp.split(current_wdir)
assert folders[-1] == NEW_DIR
@flaky(max_runs=3)
@pytest.mark.skipif(not sys.platform.startswith('linux') or PY2,
reason="It only works on Linux with python 3.")
def test_console_complete(ipyconsole, qtbot, tmpdir):
shell = ipyconsole.get_current_shellwidget()
# Give focus to the widget that's going to receive clicks
control = ipyconsole.get_widget().get_focus_widget()
control.setFocus()
def check_value(name, value):
try:
return shell.get_value(name) == value
except KeyError:
return False
with qtbot.waitSignal(shell.executed):
shell.execute('cbs = 1')
qtbot.waitUntil(lambda: check_value('cbs', 1))
qtbot.wait(500)
qtbot.keyClicks(control, 'cb')
qtbot.keyClick(control, Qt.Key_Tab)
qtbot.waitUntil(lambda: control.toPlainText().split()[-1] == 'cbs',
timeout=6000)
with qtbot.waitSignal(shell.executed):
shell.execute('cbba = 1')
qtbot.waitUntil(lambda: check_value('cbba', 1))
qtbot.keyClicks(control, 'cb')
qtbot.keyClick(control, Qt.Key_Tab)
qtbot.waitUntil(shell._completion_widget.isVisible)
assert control.toPlainText().split()[-1] == 'cb'
qtbot.keyClick(shell._completion_widget, Qt.Key_Enter)
qtbot.waitUntil(lambda: control.toPlainText().split()[-1] == 'cbba')
with qtbot.waitSignal(shell.executed):
shell.execute('%debug print()')
qtbot.keyClicks(control, 'ab')
qtbot.keyClick(control, Qt.Key_Tab)
qtbot.waitUntil(lambda: control.toPlainText().split()[-1] == 'abs')
with qtbot.waitSignal(shell.executed):
qtbot.keyClick(control, Qt.Key_Enter)
qtbot.keyClicks(control, 'print(ab')
qtbot.keyClick(control, Qt.Key_Tab)
qtbot.waitUntil(
lambda: control.toPlainText().split()[-1] == 'print(abs')
qtbot.keyClicks(control, ')')
with qtbot.waitSignal(shell.executed):
qtbot.keyClick(control, Qt.Key_Enter)
# Enter an expression
qtbot.keyClicks(control, 'baab = 10')
with qtbot.waitSignal(shell.executed):
qtbot.keyClick(control, Qt.Key_Enter)
qtbot.wait(100)
qtbot.waitUntil(lambda: check_value('baab', 10))
# Check baab is completed
qtbot.keyClicks(control, 'baa')
qtbot.keyClick(control, Qt.Key_Tab)
qtbot.waitUntil(lambda: control.toPlainText().split()[-1] == 'baab')
with qtbot.waitSignal(shell.executed):
qtbot.keyClick(control, Qt.Key_Enter)
# Check the completion widget is shown for abba, abs
qtbot.keyClicks(control, 'abba = 10')
with qtbot.waitSignal(shell.executed):
qtbot.keyClick(control, Qt.Key_Enter)
qtbot.wait(100)
qtbot.waitUntil(lambda: check_value('abba', 10))
qtbot.keyClicks(control, 'ab')
qtbot.keyClick(control, Qt.Key_Tab)
qtbot.waitUntil(shell._completion_widget.isVisible)
assert control.toPlainText().split()[-1] == 'ab'
qtbot.keyClick(shell._completion_widget, Qt.Key_Enter)
qtbot.waitUntil(lambda: control.toPlainText().split()[-1] == 'abba')
with qtbot.waitSignal(shell.executed):
qtbot.keyClick(control, Qt.Key_Enter)
# Create a class
qtbot.keyClicks(control, 'class A(): baba = 1')
with qtbot.waitSignal(shell.executed):
qtbot.keyClick(control, Qt.Key_Enter)
qtbot.wait(100)
qtbot.waitUntil(lambda: shell.is_defined('A'))
qtbot.keyClicks(control, 'a = A()')
with qtbot.waitSignal(shell.executed):
qtbot.keyClick(control, Qt.Key_Enter)
qtbot.wait(100)
qtbot.waitUntil(lambda: shell.is_defined('a'))
# Check we can complete attributes
qtbot.keyClicks(control, 'a.ba')
qtbot.keyClick(control, Qt.Key_Tab)
qtbot.waitUntil(lambda: control.toPlainText().split()[-1] == 'a.baba')
with qtbot.waitSignal(shell.executed):
qtbot.keyClick(control, Qt.Key_Enter)
# Check we can complete pdb command names
qtbot.keyClicks(control, '!longl')
qtbot.keyClick(control, Qt.Key_Tab)
qtbot.waitUntil(lambda: control.toPlainText().split()[-1] == '!longlist')
with qtbot.waitSignal(shell.executed):
qtbot.keyClick(control, Qt.Key_Enter)
# Check we can use custom complete for pdb
test_file = tmpdir.join('test.py')
test_file.write('stuff\n')
# Set a breakpoint in the new file
qtbot.keyClicks(control, '!b ' + str(test_file) + ':1')
with qtbot.waitSignal(shell.executed):
qtbot.keyClick(control, Qt.Key_Enter)
# Check we can complete the breakpoint number
qtbot.keyClicks(control, '!ignore ')
qtbot.keyClick(control, Qt.Key_Tab)
qtbot.waitUntil(lambda: control.toPlainText().split()[-1] == '1')
@flaky(max_runs=10)
@pytest.mark.use_startup_wdir
def test_pdb_multiline(ipyconsole, qtbot):
shell = ipyconsole.get_current_shellwidget()
# Give focus to the widget that's going to receive clicks
control = ipyconsole.get_widget().get_focus_widget()
control.setFocus()
with qtbot.waitSignal(shell.executed):
shell.execute('%debug print()')
assert '\nIPdb [' in control.toPlainText()
qtbot.keyClicks(control, 'if True:')
qtbot.keyClick(control, Qt.Key_Enter)
qtbot.wait(500)
qtbot.keyClicks(control, 'bb = 10')
qtbot.keyClick(control, Qt.Key_Enter)
qtbot.wait(500)
qtbot.keyClick(control, Qt.Key_Enter)
qtbot.wait(500)
assert shell.get_value('bb') == 10
assert "if True:\n ...: bb = 10\n" in control.toPlainText()
@flaky(max_runs=3)
@pytest.mark.parametrize(
"show_lib", [True, False])
def test_pdb_ignore_lib(ipyconsole, qtbot, show_lib):
shell = ipyconsole.get_current_shellwidget()
control = ipyconsole.get_widget().get_focus_widget()
control.setFocus()
# Tests assume inline backend
ipyconsole.set_conf('pdb_ignore_lib', not show_lib)
with qtbot.waitSignal(shell.executed):
shell.execute('%debug print()')
qtbot.keyClicks(control, '!s')
with qtbot.waitSignal(shell.executed):
qtbot.keyClick(control, Qt.Key_Enter)
qtbot.wait(500)
qtbot.keyClicks(control, '!q')
with qtbot.waitSignal(shell.executed):
qtbot.keyClick(control, Qt.Key_Enter)
if show_lib:
assert 'iostream.py' in control.toPlainText()
else:
assert 'iostream.py' not in control.toPlainText()
ipyconsole.set_conf('pdb_ignore_lib', True)
@flaky(max_runs=3)
@pytest.mark.skipif(sys.platform == 'darwin', reason="Times out on macOS")
def test_calltip(ipyconsole, qtbot):
shell = ipyconsole.get_current_shellwidget()
# Give focus to the widget that's going to receive clicks
control = ipyconsole.get_widget().get_focus_widget()
control.setFocus()
with qtbot.waitSignal(shell.executed):
shell.execute('a = {"a": 1}')
qtbot.keyClicks(control, 'a.keys(', delay=100)
qtbot.wait(1000)
assert control.calltip_widget.isVisible()
@flaky(max_runs=3)
@pytest.mark.order(1)
@pytest.mark.test_environment_interpreter
def test_conda_env_activation(ipyconsole, qtbot):
shell = ipyconsole.get_current_shellwidget()
with qtbot.waitSignal(shell.executed):
shell.execute(
"import os; conda_prefix = os.environ.get('CONDA_PREFIX')")
expected_output = get_conda_test_env().replace('\\', '/')
if is_conda_env(expected_output):
output = shell.get_value('conda_prefix').replace('\\', '/')
assert expected_output == output
@flaky(max_runs=3)
@pytest.mark.skipif(os.name == 'nt', reason="no SIGTERM on Windows")
def test_kernel_kill(ipyconsole, qtbot):
shell = ipyconsole.get_current_shellwidget()
qtbot.wait(3000)
crash_string = 'import os, signal; os.kill(os.getpid(), signal.SIGTERM)'
old_open_comms = list(shell.spyder_kernel_comm._comms.keys())
assert len(old_open_comms) == 1
with qtbot.waitSignal(shell.sig_prompt_ready, timeout=30000):
shell.execute(crash_string)
assert crash_string in shell._control.toPlainText()
assert "Restarting kernel..." in shell._control.toPlainText()
new_open_comms = list(shell.spyder_kernel_comm._comms.keys())
assert len(new_open_comms) == 1
assert old_open_comms[0] != new_open_comms[0]
qtbot.waitUntil(
lambda: shell.spyder_kernel_comm._comms[new_open_comms[0]][
'status'] == 'ready')
assert shell.spyder_kernel_comm._comms[new_open_comms[0]][
'status'] == 'ready'
@flaky(max_runs=3)
@pytest.mark.parametrize("spyder_pythonpath", [True, False])
def test_wrong_std_module(ipyconsole, qtbot, tmpdir, spyder_pythonpath):
if spyder_pythonpath:
wrong_random_mod = tmpdir.join('random.py')
wrong_random_mod.write('')
wrong_random_mod = str(wrong_random_mod)
ipyconsole.set_conf('spyder_pythonpath', [str(tmpdir)], section='main')
else:
wrong_random_mod = osp.join(os.getcwd(), 'random.py')
with open(wrong_random_mod, 'w') as f:
f.write('')
ipyconsole.create_new_client()
shell = ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
# Assert the extra path from spyder_pythonpath was added
if spyder_pythonpath:
check_sys_path = (
"import sys; path_added = r'{}' in sys.path".format(str(tmpdir))
)
with qtbot.waitSignal(shell.sig_prompt_ready, timeout=30000):
shell.execute(check_sys_path)
assert shell.get_value('path_added')
# Remove wrong module
os.remove(wrong_random_mod)
# Restore CONF
ipyconsole.set_conf('spyder_pythonpath', [], section='main')
@flaky(max_runs=3)
@pytest.mark.skipif(os.name == 'nt', reason="no SIGTERM on Windows")
def test_kernel_restart_after_manual_restart_and_crash(ipyconsole, qtbot):
shell = ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
# Restart kernel and wait until it's up again
shell._prompt_html = None
ipyconsole.restart_kernel()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
qtbot.wait(3000)
crash_string = 'import os, signal; os.kill(os.getpid(), signal.SIGTERM)'
with qtbot.waitSignal(shell.sig_prompt_ready, timeout=30000):
shell.execute(crash_string)
assert crash_string in shell._control.toPlainText()
with qtbot.waitSignal(shell.executed):
shell.execute('a = 10')
assert shell.is_defined('a')
open_comms = list(shell.spyder_kernel_comm._comms.keys())
qtbot.waitUntil(
lambda: shell.spyder_kernel_comm._comms[open_comms[0]][
'status'] == 'ready')
@flaky(max_runs=3)
def test_stderr_poll(ipyconsole, qtbot):
shell = ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
client = ipyconsole.get_current_client()
client.stderr_obj.handle.flush()
with open(client.stderr_obj.filename, 'a') as f:
f.write("test_test")
qtbot.waitUntil(lambda: "test_test" in ipyconsole.get_widget(
).get_focus_widget().toPlainText())
assert "test_test" in ipyconsole.get_widget(
).get_focus_widget().toPlainText()
client.stderr_obj.handle.flush()
with open(client.stderr_obj.filename, 'a') as f:
f.write("\ntest_test")
qtbot.waitUntil(lambda: ipyconsole.get_widget().get_focus_widget(
).toPlainText().count("test_test") == 2)
assert ipyconsole.get_widget().get_focus_widget().toPlainText(
).count("test_test") == 2
@flaky(max_runs=3)
def test_stdout_poll(ipyconsole, qtbot):
shell = ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
client = ipyconsole.get_current_client()
client.stdout_obj.handle.flush()
with open(client.stdout_obj.filename, 'a') as f:
f.write("test_test")
qtbot.waitUntil(lambda: "test_test" in ipyconsole.get_widget(
).get_focus_widget().toPlainText(), timeout=5000)
assert "test_test" in ipyconsole.get_widget().get_focus_widget(
).toPlainText()
@flaky(max_runs=10)
@pytest.mark.use_startup_wdir
def test_startup_code_pdb(ipyconsole, qtbot):
shell = ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
control = ipyconsole.get_widget().get_focus_widget()
control.setFocus()
# Run a line on startup
ipyconsole.set_conf(
'startup/pdb_run_lines',
'abba = 12; print("Hello")'
)
shell.execute('%debug print()')
qtbot.waitUntil(lambda: 'Hello' in control.toPlainText())
# Verify that the line was executed
assert shell.get_value('abba') == 12
# Reset setting
ipyconsole.set_conf('startup/pdb_run_lines', '')
@flaky(max_runs=3)
@pytest.mark.parametrize(
"backend",
['inline', 'qt5', 'tk', 'osx']
)
def test_pdb_eventloop(ipyconsole, qtbot, backend):
# Skip failing tests
if backend == 'tk' and os.name == 'nt':
return
if backend == 'osx' and sys.platform != "darwin":
return
if backend == 'qt5' and not os.name == "nt" and running_in_ci():
return
shell = ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
control = ipyconsole.get_widget().get_focus_widget()
with qtbot.waitSignal(shell.executed):
shell.execute("%matplotlib " + backend)
with qtbot.waitSignal(shell.executed):
shell.execute("%debug print()")
with qtbot.waitSignal(shell.executed):
shell.execute("print('Two: ' + str(1+1))")
assert "Two: 2" in control.toPlainText()
@flaky(max_runs=3)
def test_recursive_pdb(ipyconsole, qtbot):
shell = ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
control = ipyconsole.get_widget().get_focus_widget()
with qtbot.waitSignal(shell.executed):
shell.execute("%debug print()")
with qtbot.waitSignal(shell.executed):
shell.pdb_execute("abab = 10")
# Check that we can't use magic twice
with qtbot.waitSignal(shell.executed):
shell.pdb_execute("%debug print()")
assert "Please don't use '%debug'" in control.toPlainText()
# Check we can enter the recursive debugger twice
with qtbot.waitSignal(shell.executed):
shell.pdb_execute("!debug print()")
assert "(IPdb [1]):" in control.toPlainText()
with qtbot.waitSignal(shell.executed):
shell.pdb_execute("!debug print()")
assert "((IPdb [1])):" in control.toPlainText()
# quit one layer
with qtbot.waitSignal(shell.executed):
shell.pdb_execute("!quit")
assert control.toPlainText().split()[-2:] == ["(IPdb", "[2]):"]
# Check completion works
qtbot.keyClicks(control, 'aba')
qtbot.keyClick(control, Qt.Key_Tab)
qtbot.waitUntil(lambda: control.toPlainText().split()[-1] == 'abab',
timeout=SHELL_TIMEOUT)
# quit one layer
with qtbot.waitSignal(shell.executed):
shell.pdb_execute("!quit")
assert control.toPlainText().split()[-2:] == ["IPdb", "[4]:"]
# Check completion works
qtbot.keyClicks(control, 'aba')
qtbot.keyClick(control, Qt.Key_Tab)
qtbot.waitUntil(lambda: control.toPlainText().split()[-1] == 'abab',
timeout=SHELL_TIMEOUT)
with qtbot.waitSignal(shell.executed):
shell.pdb_execute("!quit")
with qtbot.waitSignal(shell.executed):
shell.execute("1 + 1")
assert control.toPlainText().split()[-2:] == ["In", "[3]:"]
@flaky(max_runs=3)
@pytest.mark.skipif(os.name == 'nt', reason="Doesn't work on windows")
def test_stop_pdb(ipyconsole, qtbot):
shell = ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
control = ipyconsole.get_widget().get_focus_widget()
stop_button = ipyconsole.get_widget().stop_button
with qtbot.waitSignal(shell.executed):
shell.execute("%debug print()")
shell.execute("import time; time.sleep(10)")
qtbot.wait(500)
with qtbot.waitSignal(shell.executed, timeout=1000):
qtbot.mouseClick(stop_button, Qt.LeftButton)
assert "KeyboardInterrupt" in control.toPlainText()
assert "IPdb [2]:" in control.toPlainText()
assert "In [2]:" not in control.toPlainText()
with qtbot.waitSignal(shell.executed):
qtbot.mouseClick(stop_button, Qt.LeftButton)
assert "In [2]:" in control.toPlainText()
@flaky(max_runs=3)
@pytest.mark.skipif(sys.platform == 'nt', reason="Times out on Windows")
def test_code_cache(ipyconsole, qtbot):
shell = ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
control = ipyconsole.get_widget().get_focus_widget()
control.setFocus()
def check_value(name, value):
try:
return shell.get_value(name) == value
except KeyError:
return False
# Send two execute requests and make sure the second one is executed
shell.execute('import time; time.sleep(.5)')
shell.execute('var = 142')
qtbot.wait(500)
qtbot.waitUntil(lambda: check_value('var', 142))
assert shell.get_value('var') == 142
# Send two execute requests and cancel the second one
shell.execute('import time; time.sleep(.5)')
shell.execute('var = 1000')
shell.interrupt_kernel()
qtbot.wait(1000)
# Make sure the value of var didn't change
assert shell.get_value('var') == 142
with qtbot.waitSignal(shell.executed):
shell.execute('%debug print()')
assert 'IPdb [' in shell._control.toPlainText()
shell.execute('time.sleep(.5)')
shell.execute('var = 318')
qtbot.wait(500)
qtbot.waitUntil(lambda: check_value('var', 318))
assert shell.get_value('var') == 318
shell.execute('import time; time.sleep(.5)')
shell.execute('var = 1000')
shell.interrupt_kernel()
qtbot.wait(1000)
assert shell.get_value('var') == 318
@flaky(max_runs=3)
@pytest.mark.skipif(PY2, reason="Doesn't work on Python 2.7")
def test_pdb_code_and_cmd_separation(ipyconsole, qtbot):
shell = ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
control = ipyconsole.get_widget().get_focus_widget()
with qtbot.waitSignal(shell.executed):
shell.execute("%debug print()")
assert "Error" not in control.toPlainText()
with qtbot.waitSignal(shell.executed):
shell.execute("e")
assert "name 'e' is not defined" in control.toPlainText()
with qtbot.waitSignal(shell.executed):
shell.execute("!n")
assert "--Return--" in control.toPlainText()
with qtbot.waitSignal(shell.executed):
shell.execute("a")
assert ("*** NameError: name 'a' is not defined"
not in control.toPlainText())
with qtbot.waitSignal(shell.executed):
shell.execute("abba")
assert "name 'abba' is not defined" in control.toPlainText()
with qtbot.waitSignal(shell.executed):
shell.execute("!abba")
assert "Unknown command 'abba'" in control.toPlainText()
@flaky(max_runs=3)
def test_breakpoint_builtin(ipyconsole, qtbot, tmpdir):
shell = ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
control = ipyconsole.get_widget().get_focus_widget()
code = dedent("""
print('foo')
breakpoint()
""")
file = tmpdir.join('test_breakpoint.py')
file.write(code)
with qtbot.waitSignal(shell.executed):
shell.execute(f"runfile(filename=r'{str(file)}')")
qtbot.wait(5000)
assert 'foo' in control.toPlainText()
assert 'IPdb [1]:' in control.toPlainText()
def test_pdb_out(ipyconsole, qtbot):
shell = ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
control = ipyconsole.get_widget().get_focus_widget()
control.setFocus()
# Enter debugging mode
with qtbot.waitSignal(shell.executed):
shell.execute('%debug print()')
# Generate some output
with qtbot.waitSignal(shell.executed):
shell.pdb_execute('a = 12 + 1; a')
assert "[1]: 13" in control.toPlainText()
# Generate hide output
with qtbot.waitSignal(shell.executed):
shell.pdb_execute('a = 14 + 1; a;')
assert "[2]: 15" not in control.toPlainText()
# Multiline
with qtbot.waitSignal(shell.executed):
shell.pdb_execute('a = 16 + 1\na')
assert "[3]: 17" in control.toPlainText()
with qtbot.waitSignal(shell.executed):
shell.pdb_execute('a = 18 + 1\na;')
assert "[4]: 19" not in control.toPlainText()
assert "IPdb [4]:" in control.toPlainText()
@flaky(max_runs=3)
@pytest.mark.auto_backend
@pytest.mark.skipif(
running_in_ci() and not os.name == 'nt',
reason="Times out on Linux and macOS")
def test_shutdown_kernel(ipyconsole, qtbot):
shell = ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
# Create a Matplotlib plot
with qtbot.waitSignal(shell.executed):
shell.execute("import matplotlib.pyplot as plt; plt.plot(range(10))")
# Get kernel pid
with qtbot.waitSignal(shell.executed):
shell.execute("import os; pid = os.getpid()")
kernel_pid = shell.get_value('pid')
# Close current tab
ipyconsole.get_widget().close_client()
# Wait until new client is created and previous kernel is shutdown
qtbot.wait(5000)
shell = ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
# Detect if previous kernel was killed
with qtbot.waitSignal(shell.executed):
shell.execute(
f"import psutil; kernel_exists = psutil.pid_exists({kernel_pid})"
)
assert not shell.get_value('kernel_exists')
def test_pdb_comprehension_namespace(ipyconsole, qtbot, tmpdir):
shell = ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
control = ipyconsole.get_widget().get_focus_widget()
# Code to run
code = "locals = 1\nx = [locals + i for i in range(2)]"
# Write code to file on disk
file = tmpdir.join('test_breakpoint.py')
file.write(code)
# Run file
with qtbot.waitSignal(shell.executed):
shell.execute(f"debugfile(filename=r'{str(file)}')")
# steps 4 times
for i in range(4):
with qtbot.waitSignal(shell.executed):
shell.pdb_execute("s")
assert "Error" not in control.toPlainText()
with qtbot.waitSignal(shell.executed):
shell.pdb_execute("print('test', locals + i + 10)")
assert "Error" not in control.toPlainText()
assert "test 11" in control.toPlainText()
settings = {
'check_all': False,
'exclude_callables_and_modules': True,
'exclude_capitalized': False,
'exclude_private': True,
'exclude_unsupported': False,
'exclude_uppercase': True,
'excluded_names': [],
'minmax': False,
'show_callable_attributes': True,
'show_special_attributes': False}
shell.call_kernel(
interrupt=True
).set_namespace_view_settings(settings)
namespace = shell.call_kernel(blocking=True).get_namespace_view()
for key in namespace:
assert "_spyderpdb" not in key
if __name__ == "__main__":
pytest.main()
| true | true |
f726a2f4cad617630bb938793f65f75b2ac968fa | 5,832 | py | Python | src/ruvsarpur/ruv_client.py | HaukurPall/ruvsarpur | bf9befe37aa8c38e7b056372e11bb0f6450497a2 | [
"MIT"
] | null | null | null | src/ruvsarpur/ruv_client.py | HaukurPall/ruvsarpur | bf9befe37aa8c38e7b056372e11bb0f6450497a2 | [
"MIT"
] | null | null | null | src/ruvsarpur/ruv_client.py | HaukurPall/ruvsarpur | bf9befe37aa8c38e7b056372e11bb0f6450497a2 | [
"MIT"
] | null | null | null | import asyncio
import json
import logging
from pathlib import Path
from typing import Dict, List, TypedDict
from gql import Client, gql
from gql.client import AsyncClientSession
from gql.transport.aiohttp import AIOHTTPTransport
log = logging.getLogger(__name__)
class Episode(TypedDict):
id: str
title: str
file: str
class Program(TypedDict):
id: str
title: str
foreign_title: str
short_description: str
episodes: List[Episode]
Programs = Dict[str, Program]
class RUVClient:
"""An HTTP client to gather a program list from ruv.is."""
def __init__(self) -> None:
self.url = "https://www.ruv.is/gql/"
transport = AIOHTTPTransport(self.url)
self.client = Client(transport=transport, execute_timeout=30)
@staticmethod
async def _query_categories(session: AsyncClientSession) -> List[str]:
query = gql(
"""
query getCategorys($station: StationSearch!) {
Category(station: $station) {
categories {
title
slug
}
}
}
"""
)
params = {
"station": "tv",
}
result = await session.execute(query, variable_values=params)
category_slugs = [category["slug"] for category in result["Category"]["categories"]] # type: ignore
return category_slugs
@staticmethod
async def _query_category(session: AsyncClientSession, category: str) -> List[Program]:
query = gql(
"""
query getKrakkaRUVCategories($station: StationSearch!, $category: String!) {
Category(station: $station, category: $category) {
categories {
programs {
short_description
episodes {
id
title
file
}
title
foreign_title
short_description
id
}
}
}
}
"""
)
params = {
"station": "tv",
"category": category,
}
result = await session.execute(query, variable_values=params)
return [
program for category in result["Category"]["categories"] for program in category["programs"] # type: ignore
]
async def _get_all_categories(self) -> List[Program]:
async with self.client as session:
categories = await self._query_categories(session)
list_of_programs_lists = await asyncio.gather(
*[asyncio.create_task(self._query_category(session, category=category)) for category in categories]
)
return [program for program_list in list_of_programs_lists for program in program_list]
@staticmethod
async def _query_all_programs(session: AsyncClientSession) -> List[Program]:
query = gql(
"""
query {
Programs {
short_description
episodes {
id
title
file
}
title
foreign_title
short_description
id
}
}
"""
)
result = await session.execute(query)
return [program for program in result["Programs"]] # type: ignore
async def _get_all_programs(self) -> Programs:
async with self.client as session:
programs = await self._query_all_programs(session)
programs_dict = {program["id"]: program for program in programs}
categories = await self._query_categories(session)
list_of_programs_lists = await asyncio.gather(
*[asyncio.create_task(self._query_category(session, category=category)) for category in categories]
)
programs_with_extra_info = {
program["id"]: program for program_list in list_of_programs_lists for program in program_list
}
self._add_extra_info(programs_dict, programs_with_extra_info)
return programs_dict
def get_all_programs(self) -> Programs:
return asyncio.run(self._get_all_programs())
@staticmethod
def _add_extra_info(programs: Programs, programs_extra_info: Programs) -> None:
"""Adds extra information from another program list to the first one."""
for p_id, program in programs.items():
if p_id in programs_extra_info:
for key in ["short_description", "foreign_title"]:
program[key] = programs_extra_info[program["id"]][key] # type: ignore
def save_programs(file_path: Path, programs: Programs):
with file_path.open("w") as f:
json.dump(programs, f)
def load_programs_cache(file_path: Path) -> Programs:
with file_path.open("r") as f:
return json.load(f)
def load_programs(force_reload, cache: Path) -> Programs:
"""Load the programs by either loading from cache or by querying ruv.is."""
if force_reload:
programs = RUVClient().get_all_programs()
else:
try:
return load_programs_cache(cache)
except FileNotFoundError:
programs = RUVClient().get_all_programs()
save_programs(cache, programs)
log.info(
f"Loaded {len(programs)} programs and {sum([len(program['episodes']) for program in programs.values()])} episodes"
)
return programs
| 33.325714 | 122 | 0.558471 | import asyncio
import json
import logging
from pathlib import Path
from typing import Dict, List, TypedDict
from gql import Client, gql
from gql.client import AsyncClientSession
from gql.transport.aiohttp import AIOHTTPTransport
log = logging.getLogger(__name__)
class Episode(TypedDict):
id: str
title: str
file: str
class Program(TypedDict):
id: str
title: str
foreign_title: str
short_description: str
episodes: List[Episode]
Programs = Dict[str, Program]
class RUVClient:
def __init__(self) -> None:
self.url = "https://www.ruv.is/gql/"
transport = AIOHTTPTransport(self.url)
self.client = Client(transport=transport, execute_timeout=30)
@staticmethod
async def _query_categories(session: AsyncClientSession) -> List[str]:
query = gql(
"""
query getCategorys($station: StationSearch!) {
Category(station: $station) {
categories {
title
slug
}
}
}
"""
)
params = {
"station": "tv",
}
result = await session.execute(query, variable_values=params)
category_slugs = [category["slug"] for category in result["Category"]["categories"]]
return category_slugs
@staticmethod
async def _query_category(session: AsyncClientSession, category: str) -> List[Program]:
query = gql(
"""
query getKrakkaRUVCategories($station: StationSearch!, $category: String!) {
Category(station: $station, category: $category) {
categories {
programs {
short_description
episodes {
id
title
file
}
title
foreign_title
short_description
id
}
}
}
}
"""
)
params = {
"station": "tv",
"category": category,
}
result = await session.execute(query, variable_values=params)
return [
program for category in result["Category"]["categories"] for program in category["programs"]
]
async def _get_all_categories(self) -> List[Program]:
async with self.client as session:
categories = await self._query_categories(session)
list_of_programs_lists = await asyncio.gather(
*[asyncio.create_task(self._query_category(session, category=category)) for category in categories]
)
return [program for program_list in list_of_programs_lists for program in program_list]
@staticmethod
async def _query_all_programs(session: AsyncClientSession) -> List[Program]:
query = gql(
"""
query {
Programs {
short_description
episodes {
id
title
file
}
title
foreign_title
short_description
id
}
}
"""
)
result = await session.execute(query)
return [program for program in result["Programs"]]
async def _get_all_programs(self) -> Programs:
async with self.client as session:
programs = await self._query_all_programs(session)
programs_dict = {program["id"]: program for program in programs}
categories = await self._query_categories(session)
list_of_programs_lists = await asyncio.gather(
*[asyncio.create_task(self._query_category(session, category=category)) for category in categories]
)
programs_with_extra_info = {
program["id"]: program for program_list in list_of_programs_lists for program in program_list
}
self._add_extra_info(programs_dict, programs_with_extra_info)
return programs_dict
def get_all_programs(self) -> Programs:
return asyncio.run(self._get_all_programs())
@staticmethod
def _add_extra_info(programs: Programs, programs_extra_info: Programs) -> None:
for p_id, program in programs.items():
if p_id in programs_extra_info:
for key in ["short_description", "foreign_title"]:
program[key] = programs_extra_info[program["id"]][key]
def save_programs(file_path: Path, programs: Programs):
with file_path.open("w") as f:
json.dump(programs, f)
def load_programs_cache(file_path: Path) -> Programs:
with file_path.open("r") as f:
return json.load(f)
def load_programs(force_reload, cache: Path) -> Programs:
if force_reload:
programs = RUVClient().get_all_programs()
else:
try:
return load_programs_cache(cache)
except FileNotFoundError:
programs = RUVClient().get_all_programs()
save_programs(cache, programs)
log.info(
f"Loaded {len(programs)} programs and {sum([len(program['episodes']) for program in programs.values()])} episodes"
)
return programs
| true | true |
f726a329312efb2fda886586854705d5e3adad9f | 6,532 | py | Python | listWrangler_20191216.py | LukeHebert/genelist_overlap | 5275e9b1d8d5dae2a78b76aed42925bdd4914418 | [
"MIT"
] | null | null | null | listWrangler_20191216.py | LukeHebert/genelist_overlap | 5275e9b1d8d5dae2a78b76aed42925bdd4914418 | [
"MIT"
] | null | null | null | listWrangler_20191216.py | LukeHebert/genelist_overlap | 5275e9b1d8d5dae2a78b76aed42925bdd4914418 | [
"MIT"
] | null | null | null | '''
Author: Luke Hebert
Date begun: December 16th, 2019
Description: finds either the intersection, union, or unique items from a set of n lists
especially useful for comparing lists of genes
inputs for unique option need to be .txt files; this could be easily tweaked though
all input and output are forced to upper case; can be easily tweaked
'''
import os, sys
def getContents(paths_list):
'''reads multiple files and assigns them to a dictionary with filepaths as keys and content lists as values'''
contents_dict = {}
for file in paths_list:
contents_dict[file] = []
with open(file, 'r') as inFile:
for line in inFile:
line = line.strip('\n').strip('\r')
contents_dict[file].append(line.upper())
return contents_dict
slash = '\\' if os.name == 'nt' else '/'
arguments_list = sys.argv[:]
#INTERSECTION OF N LISTS
if "-i" in arguments_list:
#remove python program and -i arguments to make a pathway list
inPaths_list = list(arguments_list)
temp_pathsList = list(inPaths_list)
for path in temp_pathsList:
if (path == '-i') or ('.py' in path):
inPaths_list.remove(path)
#print out the pathway indexes so that user can select one as the output pathway directory
print('\n')
for i, path in enumerate(inPaths_list):
print(str(i) + '\t' + path)
#ask user to select output file name and directory
outFileName = raw_input("\nPlease enter the name (not the path) of the output txt file (include the file suffix):")
outPath_index = int(raw_input("\nPlease enter index of the file whose path will be used for the output file (an integer):"))
if len(inPaths_list) < 2: #user must specify at least two input files for this option
print('\nUser must specify at least two lists in order to find the intersection.')
else:
print("\nYou chose to find the intersection of " + str(len(inPaths_list)) + " lists.")
contents_dict = getContents(inPaths_list) #read the input files into a dictionary
intersection_list = [] #will fill this with intersection data only
for key, val in contents_dict.iteritems(): #for each input file's list data
if len(intersection_list) == 0: #if this is the first file's data evaluated, just copy it to output list
intersection_list = list(val)
else: #the heart of the algorithm
temp_list = [item for item in val if item in intersection_list] #this should create the intersection of val and intersection_list
intersection_list = list(temp_list) #update intersection_list using a deep copy
completeOutPath = slash.join(inPaths_list[outPath_index].split(slash)[:-1] + [outFileName]) #not the most readable, but this is the output path/name
#write intersection_list to the output file as a single column of data
with open(completeOutPath, 'w') as outFile:
for item in intersection_list:
outFile.write(item + '\n')
#UNION OF N LISTS
elif "-n" in arguments_list:
#remove python program and -n arguments to make a pathway list
inPaths_list = list(arguments_list)
temp_pathsList = list(inPaths_list)
for path in temp_pathsList:
if (path == '-n') or ('.py' in path):
inPaths_list.remove(path)
#print out the pathway indexes so that user can select one as the output pathway directory
print('\n')
for i, path in enumerate(inPaths_list):
print(str(i) + '\t' + path)
#ask user to select output file name and directory
outFileName = raw_input("\nPlease enter the name (not the path) of the output txt file (include the file suffix):")
outPath_index = int(raw_input("\nPlease enter index of the file whose path will be used for the output file (an integer):"))
if len(inPaths_list) < 2: #user must specify at least two input files for this option
print('\nUser must specify at least two lists in order to find the union.')
else:
print("\nYou chose to find the union of " + str(len(inPaths_list)) + " lists.")
contents_dict = getContents(inPaths_list) #read the input files into a dictionary
union_list = [] #will fill this with intersection data only
for key, val in contents_dict.iteritems(): #for each input file's list data
if len(union_list) == 0: #if this is the first file's data evaluated, just copy it to output list
union_list = list(val)
else: #the hearth of the algorithm
temp_list = union_list + val #update union list with current file's data/list
union_list = list(set(temp_list)) #remove any duplicates
completeOutPath = slash.join(inPaths_list[outPath_index].split(slash)[:-1] + [outFileName]) #not the most readable, but this is the output path/name
#write union_list to the output file as a single column of data
with open(completeOutPath, 'w') as outFile:
for item in union_list:
outFile.write(item + '\n')
#ITEMS UNIQUE TO EACH OF N LISTS
elif "-o" in arguments_list:
inPaths_list = list(arguments_list)
#remove python program file and selection arguments from arguments list
temp_pathsList = list(inPaths_list)
for path in temp_pathsList:
if (path == '-o') or ('.py' in path):
inPaths_list.remove(path)
if len(inPaths_list) < 2: #user must specify at least two input files for this option
print('\nUser must specify at least two lists in order to find the uniques.')
else:
print("\nYou chose to find the unnique values from " + str(len(inPaths_list)) + " lists.")
contents_dict = getContents(inPaths_list) #read the input files into a dictionary
union_list = [] #will fill this with intersection data only
for key, val in contents_dict.iteritems(): #for each input file's list data
unique_list = list(val)
temp_dict = contents_dict.copy()
del temp_dict[key] #we want to check current list against all other lists, but not itself
for key2, val2 in temp_dict.iteritems(): #go through all the lists except the current list of interest
unique_list = [item for item in unique_list if item not in val2] #keep only those that are unique to unique_list
outFilePath = key.replace(".txt", "_uniques.txt")
with open(outFilePath, 'w') as outFile:
for item in unique_list:
outFile.write(item + '\n')
#SET OF ONE LIST
elif "-s" in arguments_list:
print('\nYou have chosen to take the set of a single list.')
inPath = ''
for argument in arguments_list:
if ('.py' not in argument) and ('-s' not in argument):
inPath = str(argument) #deep copy
outList = []
with open(inPath, 'r') as inFile:
for line in inFile:
outList.append(line.strip('\n'))
outSet = set(outList)
outPath = inPath.replace(".txt", "_set.txt")
with open(outPath, 'w') as outFile:
for item in outSet:
outFile.write(item.upper() + '\n') | 48.746269 | 150 | 0.729026 |
import os, sys
def getContents(paths_list):
contents_dict = {}
for file in paths_list:
contents_dict[file] = []
with open(file, 'r') as inFile:
for line in inFile:
line = line.strip('\n').strip('\r')
contents_dict[file].append(line.upper())
return contents_dict
slash = '\\' if os.name == 'nt' else '/'
arguments_list = sys.argv[:]
if "-i" in arguments_list:
inPaths_list = list(arguments_list)
temp_pathsList = list(inPaths_list)
for path in temp_pathsList:
if (path == '-i') or ('.py' in path):
inPaths_list.remove(path)
print('\n')
for i, path in enumerate(inPaths_list):
print(str(i) + '\t' + path)
outFileName = raw_input("\nPlease enter the name (not the path) of the output txt file (include the file suffix):")
outPath_index = int(raw_input("\nPlease enter index of the file whose path will be used for the output file (an integer):"))
if len(inPaths_list) < 2:
print('\nUser must specify at least two lists in order to find the intersection.')
else:
print("\nYou chose to find the intersection of " + str(len(inPaths_list)) + " lists.")
contents_dict = getContents(inPaths_list)
intersection_list = []
for key, val in contents_dict.iteritems():
if len(intersection_list) == 0: #if this is the first file's data evaluated, just copy it to output list
intersection_list = list(val)
else:
temp_list = [item for item in val if item in intersection_list]
intersection_list = list(temp_list)
completeOutPath = slash.join(inPaths_list[outPath_index].split(slash)[:-1] + [outFileName])
with open(completeOutPath, 'w') as outFile:
for item in intersection_list:
outFile.write(item + '\n')
elif "-n" in arguments_list:
inPaths_list = list(arguments_list)
temp_pathsList = list(inPaths_list)
for path in temp_pathsList:
if (path == '-n') or ('.py' in path):
inPaths_list.remove(path)
print('\n')
for i, path in enumerate(inPaths_list):
print(str(i) + '\t' + path)
outFileName = raw_input("\nPlease enter the name (not the path) of the output txt file (include the file suffix):")
outPath_index = int(raw_input("\nPlease enter index of the file whose path will be used for the output file (an integer):"))
if len(inPaths_list) < 2:
print('\nUser must specify at least two lists in order to find the union.')
else:
print("\nYou chose to find the union of " + str(len(inPaths_list)) + " lists.")
contents_dict = getContents(inPaths_list)
union_list = []
for key, val in contents_dict.iteritems():
if len(union_list) == 0: #if this is the first file's data evaluated, just copy it to output list
union_list = list(val)
else:
temp_list = union_list + val
union_list = list(set(temp_list)) #remove any duplicates
completeOutPath = slash.join(inPaths_list[outPath_index].split(slash)[:-1] + [outFileName]) #not the most readable, but this is the output path/name
#write union_list to the output file as a single column of data
with open(completeOutPath, 'w') as outFile:
for item in union_list:
outFile.write(item + '\n')
#ITEMS UNIQUE TO EACH OF N LISTS
elif "-o" in arguments_list:
inPaths_list = list(arguments_list)
#remove python program file and selection arguments from arguments list
temp_pathsList = list(inPaths_list)
for path in temp_pathsList:
if (path == '-o') or ('.py' in path):
inPaths_list.remove(path)
if len(inPaths_list) < 2: #user must specify at least two input files for this option
print('\nUser must specify at least two lists in order to find the uniques.')
else:
print("\nYou chose to find the unnique values from " + str(len(inPaths_list)) + " lists.")
contents_dict = getContents(inPaths_list) #read the input files into a dictionary
union_list = [] #will fill this with intersection data only
for key, val in contents_dict.iteritems(): #for each input file's list data
unique_list = list(val)
temp_dict = contents_dict.copy()
del temp_dict[key]
for key2, val2 in temp_dict.iteritems():
unique_list = [item for item in unique_list if item not in val2]
outFilePath = key.replace(".txt", "_uniques.txt")
with open(outFilePath, 'w') as outFile:
for item in unique_list:
outFile.write(item + '\n')
elif "-s" in arguments_list:
print('\nYou have chosen to take the set of a single list.')
inPath = ''
for argument in arguments_list:
if ('.py' not in argument) and ('-s' not in argument):
inPath = str(argument)
outList = []
with open(inPath, 'r') as inFile:
for line in inFile:
outList.append(line.strip('\n'))
outSet = set(outList)
outPath = inPath.replace(".txt", "_set.txt")
with open(outPath, 'w') as outFile:
for item in outSet:
outFile.write(item.upper() + '\n') | true | true |
f726a386b1ac9288db05e33cf07f7a65824e7d28 | 1,805 | py | Python | dataset_utils/check_bg_layer.py | ArthurWish/mmdetection | bd4c5b04e9d880f7a38131f17d3b43e4a3630c4f | [
"Apache-2.0"
] | null | null | null | dataset_utils/check_bg_layer.py | ArthurWish/mmdetection | bd4c5b04e9d880f7a38131f17d3b43e4a3630c4f | [
"Apache-2.0"
] | null | null | null | dataset_utils/check_bg_layer.py | ArthurWish/mmdetection | bd4c5b04e9d880f7a38131f17d3b43e4a3630c4f | [
"Apache-2.0"
] | null | null | null | import os
from PIL import Image, ImageDraw
from tqdm import tqdm
def label_bg_layer(img_path, label_path, img_type):
bg_data_list = os.listdir(img_path)
label_list = os.listdir(label_path)
label_prefix_list = []
for label in label_list:
label = os.path.splitext(label)[0]
label_prefix_list.append(label)
# find backgound label
for bg_data in tqdm(bg_data_list):
bg_data_withoutnew = bg_data[4:]
if bg_data_withoutnew in label_prefix_list:
single_label_path = os.path.join(label_path, bg_data_withoutnew + '.txt')
single_img_path = os.path.join(img_path, bg_data, img_type)
with open(single_label_path, 'r') as label_fp:
label_list = [
(float(x.split(" ")[1]), float(x.split(" ")[2]),
float(x.split(" ")[3]), float(x.split(" ")[4]),)
for x in label_fp.readlines()
]
image = Image.open(single_img_path)
h, w = image.size
image_draw = ImageDraw.Draw(image)
for label in label_list:
# draw label
image_draw.rectangle(
[(label[0] - label[2] / 2) * w, (label[1] - label[3] / 2) * h, (label[0] + label[2] / 2) * w,
(label[1] + label[3] / 2) * h],
fill=None, outline='red', width=3
)
# save labeled image
image.save(os.path.splitext(single_img_path)[
0] + '-labeled' + os.path.splitext(single_img_path)[1])
if __name__ == '__main__':
label_bg_layer('my-dataset/merge_background_images',
'my-dataset/labels',
img_type='filled.png')
| 41.022727 | 117 | 0.532964 | import os
from PIL import Image, ImageDraw
from tqdm import tqdm
def label_bg_layer(img_path, label_path, img_type):
bg_data_list = os.listdir(img_path)
label_list = os.listdir(label_path)
label_prefix_list = []
for label in label_list:
label = os.path.splitext(label)[0]
label_prefix_list.append(label)
for bg_data in tqdm(bg_data_list):
bg_data_withoutnew = bg_data[4:]
if bg_data_withoutnew in label_prefix_list:
single_label_path = os.path.join(label_path, bg_data_withoutnew + '.txt')
single_img_path = os.path.join(img_path, bg_data, img_type)
with open(single_label_path, 'r') as label_fp:
label_list = [
(float(x.split(" ")[1]), float(x.split(" ")[2]),
float(x.split(" ")[3]), float(x.split(" ")[4]),)
for x in label_fp.readlines()
]
image = Image.open(single_img_path)
h, w = image.size
image_draw = ImageDraw.Draw(image)
for label in label_list:
image_draw.rectangle(
[(label[0] - label[2] / 2) * w, (label[1] - label[3] / 2) * h, (label[0] + label[2] / 2) * w,
(label[1] + label[3] / 2) * h],
fill=None, outline='red', width=3
)
image.save(os.path.splitext(single_img_path)[
0] + '-labeled' + os.path.splitext(single_img_path)[1])
if __name__ == '__main__':
label_bg_layer('my-dataset/merge_background_images',
'my-dataset/labels',
img_type='filled.png')
| true | true |
f726a3f0bc85a62b7c621cc217d1f66bc6ba1017 | 4,272 | py | Python | etl/deaths.py | icane/demographic-indicators | b1c394a4497e8e4c0189bf4c0518ce38fb873d4c | [
"Apache-2.0"
] | null | null | null | etl/deaths.py | icane/demographic-indicators | b1c394a4497e8e4c0189bf4c0518ce38fb873d4c | [
"Apache-2.0"
] | 1 | 2022-01-18T11:01:29.000Z | 2022-01-18T11:01:29.000Z | etl/deaths.py | icane/demographic-indicators | b1c394a4497e8e4c0189bf4c0518ce38fb873d4c | [
"Apache-2.0"
] | null | null | null | """Deaths indicators."""
from etl.common import to_json_stat, write_to_file
from etl.config_deaths import deaths_cfg as cfg
from etlstat.extractor.extractor import xlsx
import json
import pandas as pd
def transform(df, periods, prefix=''):
"""Slice dataframe. Generate time period column.
df (dataframe): dataset
periods (int): number of time periods
prefix (str): prefix for time periods
"""
for i in range(0, len(df)):
period1 = str(df.loc[i, 'Año'])
period2 = '{:0>2}'.format(df.loc[i, 'Mes'])
df.loc[i, 'period'] = prefix + period1 + '-' + period2
df.drop(columns={'Año', 'Mes'}, axis=1, inplace=True)
df.rename(columns={'period': 'Mes'}, inplace=True)
df = df.tail(periods)
df = df.round(2)
return df
def replace_month(json_str):
"""Replace month number by its name."""
json_str = json_str.replace('-01"', '-Ene"')
json_str = json_str.replace('-02"', '-Feb"')
json_str = json_str.replace('-03"', '-Mar"')
json_str = json_str.replace('-04"', '-Abr"')
json_str = json_str.replace('-05"', '-May"')
json_str = json_str.replace('-06"', '-Jun"')
json_str = json_str.replace('-07"', '-Jul"')
json_str = json_str.replace('-08"', '-Ago"')
json_str = json_str.replace('-09"', '-Sep"')
json_str = json_str.replace('-10"', '-Oct"')
json_str = json_str.replace('-11"', '-Nov"')
json_str = json_str.replace('-12"', '-Dic"')
return json_str
# Read input files
data = xlsx(cfg.path.input)
# Datasets
df_global = pd.DataFrame()
indicators = []
for key in cfg.series:
print(key)
variables = [
'Año', 'Mes',
cfg.series[key].variables[0],
cfg.series[key].moving_avg[0]]
if (len(cfg.series[key].variables) == 2):
variables.append(cfg.series[key].variables[1])
variables.append(cfg.series[key].moving_avg[1])
df = data[cfg.file]\
[cfg.series[key].sheet][variables].copy()
# Drop NA rows, if any
df.dropna(axis=0, how='all', inplace=True)
# Rename variables
df.rename(
columns={
cfg.series[key].variables[0]: 'Cantabria',
cfg.series[key].moving_avg[0]: 'Cantabria_MM'},
inplace=True)
if (len(cfg.series[key].variables) == 2):
df.rename(
columns={
cfg.series[key].variables[1]: 'España',
cfg.series[key].moving_avg[1]: 'España_MM'},
inplace=True)
# Remove .0 from Año and Mes
df['Año'] = df['Año'].astype(str).replace('\.0', '', regex=True)
df['Mes'] = df['Mes'].astype(str).replace('\.0', '', regex=True)
# Merge global dataset
df_cant = df[['Año', 'Mes', 'Cantabria']].copy()
df_cant = transform(df_cant, cfg.periods.global_deaths, 'Cantabria - ')
df_cant.set_index('Mes', inplace=True)
df_cant = df_cant.transpose()
df_cant.insert(0, 'Categoria', cfg.series[key].category)
df_cant[' - Indicadores'] = cfg.series[key].label
if (len(cfg.series[key].variables) == 2):
df_esp = df[['Año', 'Mes', 'España']].copy()
df_esp = transform(df_esp, cfg.periods.global_deaths, 'España - ')
df_esp.set_index('Mes', inplace=True)
df_esp = df_esp.transpose()
df_esp[' - Indicadores'] = cfg.series[key].label
df_cant = df_cant.merge(df_esp, on=' - Indicadores')
indicators.append(df_cant)
# Generate JSON-Stat dataset
df = transform(df, cfg.periods.deaths)
vars = ['Cantabria', 'Cantabria_MM']
if (len(cfg.series[key].variables) == 2):
vars.append('España')
vars.append('España_MM')
json_file = to_json_stat(
df,
['Mes'],
vars,
cfg.series[key].source)
json_obj = json.loads(json_file)
json_obj['dimension']['Variables']['category']['unit'] = \
cfg.series[key].unit
json_obj['note'] = cfg.series[key].note
json_file = json.dumps(json_obj)
json_file = replace_month(json_file)
write_to_file(json_file, cfg.path.output + cfg.series[key].json)
# Generate CSV global dataset
df_global = pd.concat(indicators, axis=0, verify_integrity=False)
df_global.to_csv(cfg.path.output + cfg.globals.csv, index=False)
print('\nEnd of process. Files generated successfully.')
| 33.637795 | 75 | 0.612125 |
from etl.common import to_json_stat, write_to_file
from etl.config_deaths import deaths_cfg as cfg
from etlstat.extractor.extractor import xlsx
import json
import pandas as pd
def transform(df, periods, prefix=''):
for i in range(0, len(df)):
period1 = str(df.loc[i, 'Año'])
period2 = '{:0>2}'.format(df.loc[i, 'Mes'])
df.loc[i, 'period'] = prefix + period1 + '-' + period2
df.drop(columns={'Año', 'Mes'}, axis=1, inplace=True)
df.rename(columns={'period': 'Mes'}, inplace=True)
df = df.tail(periods)
df = df.round(2)
return df
def replace_month(json_str):
json_str = json_str.replace('-01"', '-Ene"')
json_str = json_str.replace('-02"', '-Feb"')
json_str = json_str.replace('-03"', '-Mar"')
json_str = json_str.replace('-04"', '-Abr"')
json_str = json_str.replace('-05"', '-May"')
json_str = json_str.replace('-06"', '-Jun"')
json_str = json_str.replace('-07"', '-Jul"')
json_str = json_str.replace('-08"', '-Ago"')
json_str = json_str.replace('-09"', '-Sep"')
json_str = json_str.replace('-10"', '-Oct"')
json_str = json_str.replace('-11"', '-Nov"')
json_str = json_str.replace('-12"', '-Dic"')
return json_str
data = xlsx(cfg.path.input)
df_global = pd.DataFrame()
indicators = []
for key in cfg.series:
print(key)
variables = [
'Año', 'Mes',
cfg.series[key].variables[0],
cfg.series[key].moving_avg[0]]
if (len(cfg.series[key].variables) == 2):
variables.append(cfg.series[key].variables[1])
variables.append(cfg.series[key].moving_avg[1])
df = data[cfg.file]\
[cfg.series[key].sheet][variables].copy()
df.dropna(axis=0, how='all', inplace=True)
df.rename(
columns={
cfg.series[key].variables[0]: 'Cantabria',
cfg.series[key].moving_avg[0]: 'Cantabria_MM'},
inplace=True)
if (len(cfg.series[key].variables) == 2):
df.rename(
columns={
cfg.series[key].variables[1]: 'España',
cfg.series[key].moving_avg[1]: 'España_MM'},
inplace=True)
df['Año'] = df['Año'].astype(str).replace('\.0', '', regex=True)
df['Mes'] = df['Mes'].astype(str).replace('\.0', '', regex=True)
df_cant = df[['Año', 'Mes', 'Cantabria']].copy()
df_cant = transform(df_cant, cfg.periods.global_deaths, 'Cantabria - ')
df_cant.set_index('Mes', inplace=True)
df_cant = df_cant.transpose()
df_cant.insert(0, 'Categoria', cfg.series[key].category)
df_cant[' - Indicadores'] = cfg.series[key].label
if (len(cfg.series[key].variables) == 2):
df_esp = df[['Año', 'Mes', 'España']].copy()
df_esp = transform(df_esp, cfg.periods.global_deaths, 'España - ')
df_esp.set_index('Mes', inplace=True)
df_esp = df_esp.transpose()
df_esp[' - Indicadores'] = cfg.series[key].label
df_cant = df_cant.merge(df_esp, on=' - Indicadores')
indicators.append(df_cant)
df = transform(df, cfg.periods.deaths)
vars = ['Cantabria', 'Cantabria_MM']
if (len(cfg.series[key].variables) == 2):
vars.append('España')
vars.append('España_MM')
json_file = to_json_stat(
df,
['Mes'],
vars,
cfg.series[key].source)
json_obj = json.loads(json_file)
json_obj['dimension']['Variables']['category']['unit'] = \
cfg.series[key].unit
json_obj['note'] = cfg.series[key].note
json_file = json.dumps(json_obj)
json_file = replace_month(json_file)
write_to_file(json_file, cfg.path.output + cfg.series[key].json)
df_global = pd.concat(indicators, axis=0, verify_integrity=False)
df_global.to_csv(cfg.path.output + cfg.globals.csv, index=False)
print('\nEnd of process. Files generated successfully.')
| true | true |
f726a514c5af450b08e924325a355027db5b2bb3 | 2,355 | py | Python | tests/test_payment_chargebacks.py | elcolumbio/mollie-api-python | 743c7c10df5916bfa14e2c4e82ad5cca17bc2ae3 | [
"BSD-2-Clause"
] | null | null | null | tests/test_payment_chargebacks.py | elcolumbio/mollie-api-python | 743c7c10df5916bfa14e2c4e82ad5cca17bc2ae3 | [
"BSD-2-Clause"
] | 3 | 2018-09-21T12:02:44.000Z | 2018-09-26T12:01:00.000Z | tests/test_payment_chargebacks.py | elcolumbio/mollie-api-python | 743c7c10df5916bfa14e2c4e82ad5cca17bc2ae3 | [
"BSD-2-Clause"
] | null | null | null | from mollie.api.objects.chargeback import Chargeback
from .utils import assert_list_object
PAYMENT_ID = 'tr_7UhSN1zuXS'
CHARGEBACK_ID = 'chb_n9z0tp'
def test_get_payment_chargebacks_by_payment_id(client, response):
"""Get chargebacks relevant to payment by payment id."""
response.get('https://api.mollie.com/v2/payments/%s/chargebacks' % PAYMENT_ID, 'chargebacks_list')
chargebacks = client.payment_chargebacks.with_parent_id(PAYMENT_ID).list()
assert_list_object(chargebacks, Chargeback)
def test_get_single_payment_chargeback(client, response):
"""Get a single chargeback relevant to payment by payment id."""
response.get('https://api.mollie.com/v2/payments/%s/chargebacks/%s' % (PAYMENT_ID, CHARGEBACK_ID),
'chargeback_single')
chargeback = client.payment_chargebacks.with_parent_id(PAYMENT_ID).get(CHARGEBACK_ID)
assert isinstance(chargeback, Chargeback)
assert chargeback.id == CHARGEBACK_ID
assert chargeback.amount == {'currency': 'USD', 'value': '43.38'}
assert chargeback.settlement_amount == {'currency': 'EUR', 'value': '-35.07'}
assert chargeback.created_at == '2018-03-14T17:00:52.0Z'
assert chargeback.reversed_at == '2018-03-14T17:00:55.0Z'
assert chargeback.payment_id == PAYMENT_ID
def test_list_payment_chargebacks_by_payment_object(client, response):
"""Get a list of chargebacks relevant to payment object."""
response.get('https://api.mollie.com/v2/payments/%s/chargebacks' % PAYMENT_ID, 'chargebacks_list')
response.get('https://api.mollie.com/v2/payments/%s' % PAYMENT_ID, 'payment_single')
payment = client.payments.get(PAYMENT_ID)
chargebacks = client.payment_chargebacks.on(payment).list()
assert_list_object(chargebacks, Chargeback)
def test_get_single_payment_chargeback_by_payment_object(client, response):
"""Get a single chargeback relevant to payment object."""
response.get('https://api.mollie.com/v2/payments/%s/chargebacks/%s' % (PAYMENT_ID, CHARGEBACK_ID),
'chargeback_single')
response.get('https://api.mollie.com/v2/payments/%s' % PAYMENT_ID, 'payment_single')
payment = client.payments.get(PAYMENT_ID)
chargeback = client.payment_chargebacks.on(payment).get(CHARGEBACK_ID)
assert isinstance(chargeback, Chargeback)
assert chargeback.payment_id == PAYMENT_ID
| 45.288462 | 102 | 0.745648 | from mollie.api.objects.chargeback import Chargeback
from .utils import assert_list_object
PAYMENT_ID = 'tr_7UhSN1zuXS'
CHARGEBACK_ID = 'chb_n9z0tp'
def test_get_payment_chargebacks_by_payment_id(client, response):
response.get('https://api.mollie.com/v2/payments/%s/chargebacks' % PAYMENT_ID, 'chargebacks_list')
chargebacks = client.payment_chargebacks.with_parent_id(PAYMENT_ID).list()
assert_list_object(chargebacks, Chargeback)
def test_get_single_payment_chargeback(client, response):
response.get('https://api.mollie.com/v2/payments/%s/chargebacks/%s' % (PAYMENT_ID, CHARGEBACK_ID),
'chargeback_single')
chargeback = client.payment_chargebacks.with_parent_id(PAYMENT_ID).get(CHARGEBACK_ID)
assert isinstance(chargeback, Chargeback)
assert chargeback.id == CHARGEBACK_ID
assert chargeback.amount == {'currency': 'USD', 'value': '43.38'}
assert chargeback.settlement_amount == {'currency': 'EUR', 'value': '-35.07'}
assert chargeback.created_at == '2018-03-14T17:00:52.0Z'
assert chargeback.reversed_at == '2018-03-14T17:00:55.0Z'
assert chargeback.payment_id == PAYMENT_ID
def test_list_payment_chargebacks_by_payment_object(client, response):
response.get('https://api.mollie.com/v2/payments/%s/chargebacks' % PAYMENT_ID, 'chargebacks_list')
response.get('https://api.mollie.com/v2/payments/%s' % PAYMENT_ID, 'payment_single')
payment = client.payments.get(PAYMENT_ID)
chargebacks = client.payment_chargebacks.on(payment).list()
assert_list_object(chargebacks, Chargeback)
def test_get_single_payment_chargeback_by_payment_object(client, response):
response.get('https://api.mollie.com/v2/payments/%s/chargebacks/%s' % (PAYMENT_ID, CHARGEBACK_ID),
'chargeback_single')
response.get('https://api.mollie.com/v2/payments/%s' % PAYMENT_ID, 'payment_single')
payment = client.payments.get(PAYMENT_ID)
chargeback = client.payment_chargebacks.on(payment).get(CHARGEBACK_ID)
assert isinstance(chargeback, Chargeback)
assert chargeback.payment_id == PAYMENT_ID
| true | true |
f726a8242f8fd6b97a2dbc1d66d1b2ffa30955db | 4,308 | py | Python | probability_combinatorics/linear_regression.py | codecakes/random_games | 1e670021ec97a196726e937e658878dc63ba9d34 | [
"MIT"
] | null | null | null | probability_combinatorics/linear_regression.py | codecakes/random_games | 1e670021ec97a196726e937e658878dc63ba9d34 | [
"MIT"
] | null | null | null | probability_combinatorics/linear_regression.py | codecakes/random_games | 1e670021ec97a196726e937e658878dc63ba9d34 | [
"MIT"
] | null | null | null | from math import sqrt
from itertools import izip
from numpy import mean
from py_variance_std import t_percentile
def calc_slope(r, sdy, sdx): return r * (float(sdy)/sdx)
def line_fitting(x_arr, y_arr):
"""
using straight line y = mx + c;
m(of a sample data points) = Covariance(X,Y)/Covariance(X,X) =
E[(X - E(X))(Y - E(Y))]/E[(X - E(X))^2]
Another way: Look at calc_slope given STD Y and STD X and r
"""
xbar = mean(x_arr)
ybar = mean(y_arr)
xsqr_bar = mean([i**2 for i in x_arr])
xybar = mean([i*j for i,j in izip(x_arr, y_arr)])
#calcuate the slope m
m = (xbar*ybar - xybar)/(xbar**2 - xsqr_bar)
#calculate the y intercept
c = ybar - m*xbar
return ybar,m,xbar,c
def trace_line(x_arr, y_arr, x_start = 0):
y, m, x, c = line_fitting(x_arr, y_arr)
return [(i, (m*i)+c) for i in [x_start]+list(x_arr)]
def line_error(**params):
"""
The least squares estimates represent the minimum value;
http://www.pmean.com/10/LeastSquares.html
params: x_arr, y_arr, m,c
"""
if 'x_arr' in params and 'y_arr' in params:
if ('m' in params and 'c' in params):
m,c = params['m'], params['c']
else:
y, m, x, c = line_fitting(params['x_arr'], params['y_arr'])
#return difference magnitude between y,actual - y,calculated/predicted
return [(yi - ((m*xi)+c))**2 for yi,xi in izip(params['y_arr'], params['x_arr'])]
def std_error_y_estimate(n, y_line_error_var):
"""
To construct a confidence interval for the slope of the regression line, we need to know the standard error of the sampling distribution of the slope;
n: total samples in x or y;
y_line_error_var: sum(line_error(**params))
df = n-2 since two variables while calculating linear regression.
#calculate \summ(yi - y_cap)^2 variance
line_error_var = line_error(**params)
"""
return sqrt(float(y_line_error_var)/(n-2))
def x_line_std(x_arr):
xbar = mean(x_arr)
return sqrt(sum([(xi - xbar)**2 for xi in x_arr]))
def std_error_linear(se_y, x_line_std):
"""
se_y: from std_error_y_estimate(n, y_line_error_var)
#calculate x - xbar variance and then STD
xbar = mean(x_arr)
x_line_std: x_line_std(x_arr, xbar)
"""
return se_y/x_line_std
def find_std_err_linear(x_arr, y_arr, n_sample):
#Find SE of SEy/SEx
#find descriptive params
ybar,m,xbar,c = line_fitting(x_arr, y_arr)
#find error in x
se_x = x_line_std(x_arr)
#find error in y
y_line_error = sum(line_error(**dict(x_arr=x_arr, y_arr=y_arr, m=m, c=c)))
se_y = std_error_y_estimate(n_sample, y_line_error)
#return standard error
return std_error_linear(se_y, se_x)
def r_squared(x_arr, y_arr):
"""
Literally Trying to do sqrt() of scipy.stats import pearsonr val
using functions in this module: linear_regression.py.
Also called Coefficient of Determination.
It simply means total_variation_line: How much the best fit line is
"fit" Or Away from the scattered points. High value means good fit.
How much % is explained by the Fitted Line.
High R^2 = good model, probably profitable,
Low R^2 = bad model, probably dangerous
"""
y, m, x, c = line_fitting(x_arr, y_arr)
total_var_y = ([(i-y)**2 for i in y_arr]) #(y-ybar)^2
#print sum(total_var_y)
#\summ(yi - mxi * c)^2/\summ(yi - ybar)^2
variation_not_by_line = float(sum(line_error(x_arr=x_arr, y_arr=y_arr, m=m, c=c)))/sum(total_var_y)
#R sqaured
return 1 - variation_not_by_line #total variation in x, variation in line
def calc_tscore_from_r(r2,n):
"""
Hypothesis Testing if relationship is due to sampling error.
r: coefficient of determination
n: number of elements in a sample
Returns: t score
For looking at critical t val and comparing the t score,
df = n-2 since there are 2 variables for correlation under test.
"""
return sqrt(r2*float(n-2)/(1 - r2))
def calc_p_from_tval_from_r(r,n, one_tailed= 0 ):
return t_percentile(calc_tscore_from_r(r,n), n-2, one_tailed= one_tailed)
def margin_error_linear(tscore, se): return tscore * se
def ci_linear(slope, tscore, se):
margin_error = margin_error_linear(tscore, se)
return (slope - margin_error, slope + margin_error)
| 35.311475 | 154 | 0.669452 | from math import sqrt
from itertools import izip
from numpy import mean
from py_variance_std import t_percentile
def calc_slope(r, sdy, sdx): return r * (float(sdy)/sdx)
def line_fitting(x_arr, y_arr):
xbar = mean(x_arr)
ybar = mean(y_arr)
xsqr_bar = mean([i**2 for i in x_arr])
xybar = mean([i*j for i,j in izip(x_arr, y_arr)])
m = (xbar*ybar - xybar)/(xbar**2 - xsqr_bar)
c = ybar - m*xbar
return ybar,m,xbar,c
def trace_line(x_arr, y_arr, x_start = 0):
y, m, x, c = line_fitting(x_arr, y_arr)
return [(i, (m*i)+c) for i in [x_start]+list(x_arr)]
def line_error(**params):
if 'x_arr' in params and 'y_arr' in params:
if ('m' in params and 'c' in params):
m,c = params['m'], params['c']
else:
y, m, x, c = line_fitting(params['x_arr'], params['y_arr'])
return [(yi - ((m*xi)+c))**2 for yi,xi in izip(params['y_arr'], params['x_arr'])]
def std_error_y_estimate(n, y_line_error_var):
return sqrt(float(y_line_error_var)/(n-2))
def x_line_std(x_arr):
xbar = mean(x_arr)
return sqrt(sum([(xi - xbar)**2 for xi in x_arr]))
def std_error_linear(se_y, x_line_std):
return se_y/x_line_std
def find_std_err_linear(x_arr, y_arr, n_sample):
ybar,m,xbar,c = line_fitting(x_arr, y_arr)
se_x = x_line_std(x_arr)
y_line_error = sum(line_error(**dict(x_arr=x_arr, y_arr=y_arr, m=m, c=c)))
se_y = std_error_y_estimate(n_sample, y_line_error)
return std_error_linear(se_y, se_x)
def r_squared(x_arr, y_arr):
y, m, x, c = line_fitting(x_arr, y_arr)
total_var_y = ([(i-y)**2 for i in y_arr])
variation_not_by_line = float(sum(line_error(x_arr=x_arr, y_arr=y_arr, m=m, c=c)))/sum(total_var_y)
return 1 - variation_not_by_line
def calc_tscore_from_r(r2,n):
return sqrt(r2*float(n-2)/(1 - r2))
def calc_p_from_tval_from_r(r,n, one_tailed= 0 ):
return t_percentile(calc_tscore_from_r(r,n), n-2, one_tailed= one_tailed)
def margin_error_linear(tscore, se): return tscore * se
def ci_linear(slope, tscore, se):
margin_error = margin_error_linear(tscore, se)
return (slope - margin_error, slope + margin_error)
| true | true |
f726a835b02eb3b1ea4dadc3134351ab0143ad58 | 1,806 | py | Python | tools/photon_yield.py | LeoRoweBrown/ckvpy | fff27847f5577750ae5860e3fdff81877fa4455a | [
"MIT"
] | null | null | null | tools/photon_yield.py | LeoRoweBrown/ckvpy | fff27847f5577750ae5860e3fdff81877fa4455a | [
"MIT"
] | null | null | null | tools/photon_yield.py | LeoRoweBrown/ckvpy | fff27847f5577750ae5860e3fdff81877fa4455a | [
"MIT"
] | null | null | null | import numpy as np
from scipy.integrate import simps
import scipy.constants as const
def compute(theta_in, f, beta, L, n=None):
"""compute number of photons due to Frank-Tamm and Fresen equations
theta (ndarray/list[float]): Angles in chosen wavelength range
f (ndarray/list[float]): Frequencies in chosen wavelength range
n (ndarray/list[float]): Refractive index in chosen wavelength range
beta (float): Ratio of electron speed to speed of light
TODO: replace n = 1/(beta*np.cos(theta_in)) with actual n_eff
"""
if n is None:
print("Using Cherenkov angle to derive n instead of d(omega)/dk")
n = 1/(beta*np.cos(theta_in))
r_s = np.absolute(
(n*np.cos(theta_in) - np.sqrt(1-(n*np.sin(theta_in)**2.)))/ \
(n*np.cos(theta_in) + np.sqrt(1-(n*np.sin(theta_in)**2.)))
)
r_p = np.absolute(
(n*np.sqrt(1-(n*np.sin(theta_in)**2.)) - np.cos(theta_in))/ \
(n*np.sqrt(1-(n*np.sin(theta_in)**2.)) + np.cos(theta_in))
)
r_eff =(r_p + r_s)/2.
# print(r_eff)
t_eff = 1-r_eff
print("Transmission coeff:", t_eff)
# derive angles inside medium with snell's law for Fresnel equation
# theta_in = np.arcsin(n*np.sin(theta))
# n_photons = \
# (const*fine_structure/(const.hbar*const.c**2.))*\
# simps((1-1./(beta**2.*n**2.))*t_eff, x=const.h*f)
# need even spaced intervals -> interpolate
# integral is over f
f_interp = np.linspace(np.min(f), np.max(f), num=30)
theta_interp = np.interp(f_interp, f, theta_in)
t_eff_interp = np.interp(f_interp, f, t_eff)
n_photons = \
L*(const.fine_structure/(const.hbar*const.c))* \
simps(np.sin(theta_interp)**2.*t_eff_interp*const.h, x=f_interp)
print(n_photons, "photons")
return n_photons | 42 | 73 | 0.633444 | import numpy as np
from scipy.integrate import simps
import scipy.constants as const
def compute(theta_in, f, beta, L, n=None):
if n is None:
print("Using Cherenkov angle to derive n instead of d(omega)/dk")
n = 1/(beta*np.cos(theta_in))
r_s = np.absolute(
(n*np.cos(theta_in) - np.sqrt(1-(n*np.sin(theta_in)**2.)))/ \
(n*np.cos(theta_in) + np.sqrt(1-(n*np.sin(theta_in)**2.)))
)
r_p = np.absolute(
(n*np.sqrt(1-(n*np.sin(theta_in)**2.)) - np.cos(theta_in))/ \
(n*np.sqrt(1-(n*np.sin(theta_in)**2.)) + np.cos(theta_in))
)
r_eff =(r_p + r_s)/2.
t_eff = 1-r_eff
print("Transmission coeff:", t_eff)
# theta_in = np.arcsin(n*np.sin(theta))
# n_photons = \
# (const*fine_structure/(const.hbar*const.c**2.))*\
# simps((1-1./(beta**2.*n**2.))*t_eff, x=const.h*f)
# need even spaced intervals -> interpolate
# integral is over f
f_interp = np.linspace(np.min(f), np.max(f), num=30)
theta_interp = np.interp(f_interp, f, theta_in)
t_eff_interp = np.interp(f_interp, f, t_eff)
n_photons = \
L*(const.fine_structure/(const.hbar*const.c))* \
simps(np.sin(theta_interp)**2.*t_eff_interp*const.h, x=f_interp)
print(n_photons, "photons")
return n_photons | true | true |
f726a842f0367a5bce40537953cbd52aa33b1909 | 4,830 | py | Python | model/network.py | andrewschreiber/numpy-saliency | 2e788a1150f6e160f2271cbb4f20747559f243c0 | [
"MIT"
] | 10 | 2019-07-30T02:36:21.000Z | 2020-12-22T06:35:40.000Z | model/network.py | andrewschreiber/numpy-saliency | 2e788a1150f6e160f2271cbb4f20747559f243c0 | [
"MIT"
] | 6 | 2019-08-09T02:17:38.000Z | 2022-03-11T23:56:24.000Z | model/network.py | andrewschreiber/numpy-saliency | 2e788a1150f6e160f2271cbb4f20747559f243c0 | [
"MIT"
] | 2 | 2019-08-03T08:38:26.000Z | 2020-06-29T12:58:47.000Z | import numpy as np
import pickle
from model.loss import cross_entropy
from model.layers import Conv2D, Maxpool2D, Dense, Flatten, ReLu, Softmax
class LeNet5:
"""Implementation of LeNet 5 for MNIST
http://yann.lecun.com/exdb/publis/pdf/lecun-98.pdf
"""
def __init__(self, weights_path=None):
lr = 0.01
layers = []
layers.append(Conv2D(n_filter=6, n_channel=1,
kernel_size=5, padding=2, stride=1,
learning_rate=lr, name='conv1'))
layers.append(ReLu())
layers.append(Maxpool2D(
pool_size=2, stride=2, name='maxpool2'))
layers.append(Conv2D(n_filter=16, n_channel=6,
kernel_size=5, padding=0, stride=1,
learning_rate=lr, name='conv3'))
layers.append(ReLu())
layers.append(Maxpool2D(
pool_size=2, stride=2, name='maxpool4'))
layers.append(Conv2D(n_filter=120, n_channel=16,
kernel_size=5, padding=0, stride=1,
learning_rate=lr, name='conv5'))
layers.append(ReLu())
layers.append(Flatten())
layers.append(Dense(
num_inputs=120, num_outputs=84, learning_rate=lr, name='dense6'))
layers.append(ReLu())
layers.append(Dense(
num_inputs=84, num_outputs=10, learning_rate=lr, name='dense7'))
layers.append(Softmax())
self.layers = layers
if weights_path is not None:
self._load(weights_path)
def _load(self, weights_path):
with open(weights_path, 'rb') as handle:
b = pickle.load(handle)
self.layers[0].load(b[0]['conv1.weights'], b[0]['conv1.bias'])
self.layers[3].load(b[3]['conv3.weights'], b[3]['conv3.bias'])
self.layers[6].load(b[6]['conv5.weights'], b[6]['conv5.bias'])
self.layers[9].load(b[9]['dense6.weights'], b[9]['dense6.bias'])
self.layers[11].load(b[11]['dense7.weights'], b[11]['dense7.bias'])
def train(self, training_data, training_labels, batch_size, epochs,
weights_path):
print("Training LeNet...")
total_acc = 0
for epoch in range(epochs):
# batch training data
for batch_index in range(0, training_data.shape[0], batch_size):
loss = 0
acc = 0
data = training_data[batch_index:batch_index+batch_size]
labels = training_labels[batch_index:batch_index+batch_size]
# iterate over batch
for b in range(len(data)):
x = data[b]
y = labels[b]
# forward pass
output = self.forward(x)
if np.argmax(output) == np.argmax(y):
acc += 1
total_acc += 1
loss += cross_entropy(output, y)
# backward pass
# update network on each datapoint for simplicity
dy = y
for l in range(len(self.layers)-1, -1, -1):
dout = self.layers[l].backward(dy)
dy = dout
# print performance
loss /= len(data)
batch_acc = float(acc)/float(len(data))
train_acc = float(total_acc) / \
float((batch_index+len(data)+epoch*len(training_data)))
print(('| Epoch: {0:d}/{1:d} | Iter:{2:d} | Loss: {3:.2f} | ' +
'BatchAcc: {4:.2f} | TrainAcc: {5:.2f} |')
.format(epoch+1, epochs, batch_index+len(data),
loss, batch_acc, train_acc))
# save parameters after each epoch
print("Saving model to", weights_path)
layers = [layer.parameters() for layer in self.layers]
with open(weights_path, 'wb') as handle:
pickle.dump(layers, handle, protocol=pickle.HIGHEST_PROTOCOL)
def forward(self, x):
for l in range(len(self.layers)):
output = self.layers[l].forward(x)
x = output
return output
def predict(self, x):
output = self.forward(x)
digit = np.argmax(output)
probability = output[0, digit]
return digit, probability
def test(self, data, labels):
print("Testing LeNet...")
total_acc = 0
test_size = len(data)
for i in range(test_size):
x = data[i]
y = labels[i]
if np.argmax(self.forward(x)) == np.argmax(y):
total_acc += 1
print("== Correct: {}/{}. Accuracy: {} =="
.format(total_acc, test_size, total_acc/test_size))
| 38.951613 | 79 | 0.522774 | import numpy as np
import pickle
from model.loss import cross_entropy
from model.layers import Conv2D, Maxpool2D, Dense, Flatten, ReLu, Softmax
class LeNet5:
def __init__(self, weights_path=None):
lr = 0.01
layers = []
layers.append(Conv2D(n_filter=6, n_channel=1,
kernel_size=5, padding=2, stride=1,
learning_rate=lr, name='conv1'))
layers.append(ReLu())
layers.append(Maxpool2D(
pool_size=2, stride=2, name='maxpool2'))
layers.append(Conv2D(n_filter=16, n_channel=6,
kernel_size=5, padding=0, stride=1,
learning_rate=lr, name='conv3'))
layers.append(ReLu())
layers.append(Maxpool2D(
pool_size=2, stride=2, name='maxpool4'))
layers.append(Conv2D(n_filter=120, n_channel=16,
kernel_size=5, padding=0, stride=1,
learning_rate=lr, name='conv5'))
layers.append(ReLu())
layers.append(Flatten())
layers.append(Dense(
num_inputs=120, num_outputs=84, learning_rate=lr, name='dense6'))
layers.append(ReLu())
layers.append(Dense(
num_inputs=84, num_outputs=10, learning_rate=lr, name='dense7'))
layers.append(Softmax())
self.layers = layers
if weights_path is not None:
self._load(weights_path)
def _load(self, weights_path):
with open(weights_path, 'rb') as handle:
b = pickle.load(handle)
self.layers[0].load(b[0]['conv1.weights'], b[0]['conv1.bias'])
self.layers[3].load(b[3]['conv3.weights'], b[3]['conv3.bias'])
self.layers[6].load(b[6]['conv5.weights'], b[6]['conv5.bias'])
self.layers[9].load(b[9]['dense6.weights'], b[9]['dense6.bias'])
self.layers[11].load(b[11]['dense7.weights'], b[11]['dense7.bias'])
def train(self, training_data, training_labels, batch_size, epochs,
weights_path):
print("Training LeNet...")
total_acc = 0
for epoch in range(epochs):
for batch_index in range(0, training_data.shape[0], batch_size):
loss = 0
acc = 0
data = training_data[batch_index:batch_index+batch_size]
labels = training_labels[batch_index:batch_index+batch_size]
for b in range(len(data)):
x = data[b]
y = labels[b]
output = self.forward(x)
if np.argmax(output) == np.argmax(y):
acc += 1
total_acc += 1
loss += cross_entropy(output, y)
dy = y
for l in range(len(self.layers)-1, -1, -1):
dout = self.layers[l].backward(dy)
dy = dout
loss /= len(data)
batch_acc = float(acc)/float(len(data))
train_acc = float(total_acc) / \
float((batch_index+len(data)+epoch*len(training_data)))
print(('| Epoch: {0:d}/{1:d} | Iter:{2:d} | Loss: {3:.2f} | ' +
'BatchAcc: {4:.2f} | TrainAcc: {5:.2f} |')
.format(epoch+1, epochs, batch_index+len(data),
loss, batch_acc, train_acc))
print("Saving model to", weights_path)
layers = [layer.parameters() for layer in self.layers]
with open(weights_path, 'wb') as handle:
pickle.dump(layers, handle, protocol=pickle.HIGHEST_PROTOCOL)
def forward(self, x):
for l in range(len(self.layers)):
output = self.layers[l].forward(x)
x = output
return output
def predict(self, x):
output = self.forward(x)
digit = np.argmax(output)
probability = output[0, digit]
return digit, probability
def test(self, data, labels):
print("Testing LeNet...")
total_acc = 0
test_size = len(data)
for i in range(test_size):
x = data[i]
y = labels[i]
if np.argmax(self.forward(x)) == np.argmax(y):
total_acc += 1
print("== Correct: {}/{}. Accuracy: {} =="
.format(total_acc, test_size, total_acc/test_size))
| true | true |
f726aa04174ce7bf2f5c510516bdd17021d883d8 | 6,175 | py | Python | deepecg/training/model/disc/model.py | Seb-Good/deepecg | c99fbe80718ee9969936154ae2c1a04d81c9b246 | [
"MIT"
] | 56 | 2019-02-20T04:47:25.000Z | 2022-03-23T01:12:43.000Z | deepecg/training/model/disc/model.py | vivektalwar13071999/deepecg | c99fbe80718ee9969936154ae2c1a04d81c9b246 | [
"MIT"
] | 7 | 2019-12-16T20:59:36.000Z | 2022-02-09T23:48:59.000Z | deepecg/training/model/disc/model.py | vivektalwar13071999/deepecg | c99fbe80718ee9969936154ae2c1a04d81c9b246 | [
"MIT"
] | 22 | 2019-02-24T02:57:20.000Z | 2022-03-23T01:12:49.000Z | """
model.py
--------
This module provides a class and methods for building and managing a model with tensorflow.
By: Sebastian D. Goodfellow, Ph.D., 2018
"""
# Compatibility imports
from __future__ import absolute_import, division, print_function
# 3rd party imports
import os
import sys
import json
import pickle
import tensorflow as tf
# Local imports
from deepecg.training.model.disc.graph import Graph
from deepecg.training.networks.deep_ecg_v1 import DeepECGV1
from deepecg.training.networks.deep_ecg_v2 import DeepECGV2
from deepecg.training.networks.deep_ecg_v3 import DeepECGV3
from deepecg.training.networks.deep_ecg_v4 import DeepECGV4
from deepecg.training.networks.deep_ecg_v5 import DeepECGV5
from deepecg.training.networks.deep_ecg_v6 import DeepECGV6
from deepecg.training.networks.deep_ecg_v7 import DeepECGV7
class Model(object):
"""A class for managing a model through training."""
def __init__(self, model_name, network_name, network_parameters, save_path, data_path, max_to_keep):
# Set input parameters
self.model_name = model_name
self.network_name = network_name
self.network_parameters = network_parameters
self.save_path = os.path.join(save_path, self.model_name)
self.data_path = data_path
self.max_to_keep = max_to_keep
# Set attributes
self.sess = None
self.graph = None
self.network = None
# Create project file structure
self._create_folder_structure()
# Save parameters
self._save_parameters()
# Initialize graph
self.initialize_graph()
def initialize_graph(self):
# Get neural network
self.network = self._get_neural_network()
# Save network object
self._pickle_network()
# Build computational graph
self.graph = Graph(network=self.network, save_path=self.save_path, data_path=self.data_path,
max_to_keep=self.max_to_keep)
# Start session
self.sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True))
# Initialize global variables
self.sess.run(self.graph.init_global)
@classmethod
def build_training_graph(cls, save_path):
"""Build training graph."""
# Import model parameters
model_parameters = cls._import_model_parameters(save_path=save_path)
# Import network parameters
network_parameters = cls._import_network_parameters(save_path=save_path)
# Initialize Model
return cls(model_name=model_parameters['model_name'], network_name=model_parameters['network_name'],
network_parameters=network_parameters, save_path=os.path.dirname(save_path),
data_path=model_parameters['data_path'], max_to_keep=model_parameters['max_to_keep'])
def restore(self, global_step):
"""Restore model from checkpoint."""
# Initialize graph
if self.sess._closed:
self.initialize_graph()
# Restore checkpoint
self.graph.saver.restore(sess=self.sess, save_path=os.path.join(self.save_path, 'checkpoints', global_step))
def close_session(self):
"""Close any active sessions."""
try:
self.sess.close()
except AttributeError:
print('No active Tensorflow session.')
def _save_parameters(self):
"""Save model and network parameters to JSON."""
# Save model parameters
self._save_model_parameters()
# Save network parameters
self._save_network_parameters()
def _save_model_parameters(self):
"""Save model parameters to JSON."""
# Get model parameters
model_parameters = dict(model_name=self.model_name, network_name=self.network_name, save_path=self.save_path,
data_path=self.data_path, max_to_keep=self.max_to_keep)
# Save model parameters to JSON
if not os.path.exists(os.path.join(self.save_path, 'parameters', 'model_parameters.json')):
with open(os.path.join(self.save_path, 'parameters', 'model_parameters.json'), 'w') as file:
json.dump(model_parameters, file)
def _save_network_parameters(self):
"""Save network parameters to JSON."""
if not os.path.exists(os.path.join(self.save_path, 'parameters', 'network_parameters.json')):
with open(os.path.join(self.save_path, 'parameters', 'network_parameters.json'), 'w') as file:
json.dump(self.network_parameters, file)
def _get_neural_network(self):
"""Instantiate neural network."""
# Convert string to class
network = getattr(sys.modules[__name__], self.network_name)
# Instantiate network class with network parameters
network = network(**self.network_parameters)
return network
def _create_folder_structure(self):
# Set list of folders
folders = ['train', 'val', 'checkpoints', 'network', 'graph', 'logs', 'parameters']
# Main project directory
if not os.path.exists(self.save_path):
os.makedirs(self.save_path)
# Loop through and create project folders
for folder in folders:
self._create_folder(folder=folder)
def _create_folder(self, folder):
"""Create folder."""
if not os.path.exists(os.path.join(self.save_path, folder)):
os.makedirs(os.path.join(self.save_path, folder))
def _pickle_network(self):
"""Pickle graph."""
with open(os.path.join(self.save_path, 'network', 'network.obj'), 'wb') as file:
pickle.dump(obj=self.network, file=file)
@staticmethod
def _import_model_parameters(save_path):
"""Import model parameters."""
with open(os.path.join(save_path, 'parameters', 'model_parameters.json')) as file:
return json.load(file)
@staticmethod
def _import_network_parameters(save_path):
"""Import network parameters."""
with open(os.path.join(save_path, 'parameters', 'network_parameters.json')) as file:
return json.load(file)
| 35.488506 | 117 | 0.674494 |
from __future__ import absolute_import, division, print_function
import os
import sys
import json
import pickle
import tensorflow as tf
from deepecg.training.model.disc.graph import Graph
from deepecg.training.networks.deep_ecg_v1 import DeepECGV1
from deepecg.training.networks.deep_ecg_v2 import DeepECGV2
from deepecg.training.networks.deep_ecg_v3 import DeepECGV3
from deepecg.training.networks.deep_ecg_v4 import DeepECGV4
from deepecg.training.networks.deep_ecg_v5 import DeepECGV5
from deepecg.training.networks.deep_ecg_v6 import DeepECGV6
from deepecg.training.networks.deep_ecg_v7 import DeepECGV7
class Model(object):
def __init__(self, model_name, network_name, network_parameters, save_path, data_path, max_to_keep):
self.model_name = model_name
self.network_name = network_name
self.network_parameters = network_parameters
self.save_path = os.path.join(save_path, self.model_name)
self.data_path = data_path
self.max_to_keep = max_to_keep
self.sess = None
self.graph = None
self.network = None
self._create_folder_structure()
self._save_parameters()
self.initialize_graph()
def initialize_graph(self):
self.network = self._get_neural_network()
self._pickle_network()
self.graph = Graph(network=self.network, save_path=self.save_path, data_path=self.data_path,
max_to_keep=self.max_to_keep)
self.sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True))
self.sess.run(self.graph.init_global)
@classmethod
def build_training_graph(cls, save_path):
model_parameters = cls._import_model_parameters(save_path=save_path)
network_parameters = cls._import_network_parameters(save_path=save_path)
return cls(model_name=model_parameters['model_name'], network_name=model_parameters['network_name'],
network_parameters=network_parameters, save_path=os.path.dirname(save_path),
data_path=model_parameters['data_path'], max_to_keep=model_parameters['max_to_keep'])
def restore(self, global_step):
if self.sess._closed:
self.initialize_graph()
self.graph.saver.restore(sess=self.sess, save_path=os.path.join(self.save_path, 'checkpoints', global_step))
def close_session(self):
try:
self.sess.close()
except AttributeError:
print('No active Tensorflow session.')
def _save_parameters(self):
self._save_model_parameters()
self._save_network_parameters()
def _save_model_parameters(self):
model_parameters = dict(model_name=self.model_name, network_name=self.network_name, save_path=self.save_path,
data_path=self.data_path, max_to_keep=self.max_to_keep)
if not os.path.exists(os.path.join(self.save_path, 'parameters', 'model_parameters.json')):
with open(os.path.join(self.save_path, 'parameters', 'model_parameters.json'), 'w') as file:
json.dump(model_parameters, file)
def _save_network_parameters(self):
if not os.path.exists(os.path.join(self.save_path, 'parameters', 'network_parameters.json')):
with open(os.path.join(self.save_path, 'parameters', 'network_parameters.json'), 'w') as file:
json.dump(self.network_parameters, file)
def _get_neural_network(self):
network = getattr(sys.modules[__name__], self.network_name)
network = network(**self.network_parameters)
return network
def _create_folder_structure(self):
folders = ['train', 'val', 'checkpoints', 'network', 'graph', 'logs', 'parameters']
if not os.path.exists(self.save_path):
os.makedirs(self.save_path)
for folder in folders:
self._create_folder(folder=folder)
def _create_folder(self, folder):
if not os.path.exists(os.path.join(self.save_path, folder)):
os.makedirs(os.path.join(self.save_path, folder))
def _pickle_network(self):
with open(os.path.join(self.save_path, 'network', 'network.obj'), 'wb') as file:
pickle.dump(obj=self.network, file=file)
@staticmethod
def _import_model_parameters(save_path):
with open(os.path.join(save_path, 'parameters', 'model_parameters.json')) as file:
return json.load(file)
@staticmethod
def _import_network_parameters(save_path):
with open(os.path.join(save_path, 'parameters', 'network_parameters.json')) as file:
return json.load(file)
| true | true |
f726aa89dee842342ea1bd383144960b734ac342 | 607 | py | Python | setup.py | Yoshiki443/weather_parameters | ae2c9ed02f68968cb6ea0610d556f3c68bbc923e | [
"MIT"
] | 17 | 2020-04-26T20:25:56.000Z | 2022-03-10T09:41:54.000Z | setup.py | Yoshiki443/weather_parameters | ae2c9ed02f68968cb6ea0610d556f3c68bbc923e | [
"MIT"
] | null | null | null | setup.py | Yoshiki443/weather_parameters | ae2c9ed02f68968cb6ea0610d556f3c68bbc923e | [
"MIT"
] | 1 | 2020-06-08T04:54:30.000Z | 2020-06-08T04:54:30.000Z | import setuptools
setuptools.setup(
name="wxparams",
version="1.5",
author="Yoshiki Kato",
# author_email="",
description="Weather Parameters Calculator",
long_description="This is a python module for calculating meteorological parameters.",
long_description_content_type="text/markdown",
url="https://github.com/Yoshiki443/weather_parameters",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
license='MIT'
)
| 30.35 | 90 | 0.678748 | import setuptools
setuptools.setup(
name="wxparams",
version="1.5",
author="Yoshiki Kato",
description="Weather Parameters Calculator",
long_description="This is a python module for calculating meteorological parameters.",
long_description_content_type="text/markdown",
url="https://github.com/Yoshiki443/weather_parameters",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
license='MIT'
)
| true | true |
f726aacd3954efc8b82de1b378ebd375941886de | 6,348 | py | Python | Testing/ND-Testing.py | garibaldu/boundary-seekers | 441fea01e93de882bf22e0deb411f0b10602fa37 | [
"MIT"
] | null | null | null | Testing/ND-Testing.py | garibaldu/boundary-seekers | 441fea01e93de882bf22e0deb411f0b10602fa37 | [
"MIT"
] | null | null | null | Testing/ND-Testing.py | garibaldu/boundary-seekers | 441fea01e93de882bf22e0deb411f0b10602fa37 | [
"MIT"
] | null | null | null | import numpy as np
import tensorflow as tf
def __perms(n):
if not n:
return
p = []
for i in range(0, 2**n):
s = bin(i)[2:]
s = "0" * (n-len(s)) + s
s_prime = np.array(list(map(lambda x: int(x), list(s))))
p.append(s_prime)
return p
def care(normal, bias, example):
z = np.dot(normal, example) + bias
return 1.0/(1.0 + np.exp(-z))
def deci(normal, bias, example):
z = np.dot(normal, example) + bias
return 1.0/(1.0 + np.exp(-z))
def sigmoid(phi):
return 1.0/(1.0 + tf.exp(-phi))
def compute_penalty(weights, size):
mask = np.concatenate((np.array([0], dtype=np.float32), np.ones(size, dtype=np.float32)))
return tf.reduce_sum(tf.abs(tf.multiply(mask, weights)))
def train_boundary_hunter(points, out, iterations):
in_size = len(points[0])
out_size = 1
inputs = tf.placeholder('float32', [in_size])
targets = tf.placeholder('float32', [out_size])
hidden_weights = tf.Variable(np.random.uniform(low=-0.5, high=0.5, size=(1, in_size+1)), dtype='float32')
gate_weights = tf.Variable(np.random.uniform(low=-0.5, high=0.5, size=(1, in_size+1)), dtype='float32')
byas = tf.Variable(np.random.uniform(low=-0.5, high=0.5, size=(1)), dtype='float32')
#output_weights = tf.Variable(np.random.uniform(low=-0.5, high=0.5, size=(out_size, num_centroids + 1)), dtype='float32')
inputs_prime = tf.concat([[1.0], inputs], axis=0)
# Peform Computation
# Peform Computation
prob = tf.reduce_sum(tf.multiply(inputs_prime, hidden_weights), 1)
g = sigmoid(tf.reduce_sum(tf.multiply(inputs_prime, gate_weights), 1))
#hidden_out = tf.add(byas, tf.multiply(g, tf.subtract(prob, byas)))
hidden_out = sigmoid(tf.add(g * prob, (1-g) * byas))
reward = tf.log(compute_penalty(hidden_weights, in_size) + compute_penalty(gate_weights, in_size))
targets_prime = tf.expand_dims(targets, 1)
output = hidden_out
errors = -(targets_prime * tf.log(output) + (1 -targets_prime) * tf.log(1 - output))#tf.pow(tf.subtract(tf.expand_dims(targets, 1), output), 2.0)
error = tf.reduce_sum(errors)
minimize = error - 0.02 * reward
train_op = tf.train.GradientDescentOptimizer(0.01).minimize(minimize)
#clip_byas = tf.assign(byas, tf.clip_by_value(byas, 0, 1))
model = tf.global_variables_initializer()
with tf.Session() as session:
session.run(model)
for e in range(iterations):
for d in range(len(points)):
session.run(train_op, feed_dict={inputs: points[d], targets: [out[d]]})
#session.run(clip_byas)
if e % 10 == 0:
print(session.run(byas))
err = 0
for d in range(len(points)):
err += session.run(error, feed_dict={inputs: points[d], targets: [out[d]]})
print(err)
print(session.run(reward))
print()
gates = session.run(gate_weights)[0]
byas = session.run(byas)[0]
boundarys = session.run(hidden_weights)[0]
return (boundarys, gates, byas)
def get_final_class(predictions):
tally_0 = 0
tally_1 = 0
for p in predictions:
if (not p == None) and p >= 0.5:
tally_1 += 1
elif (not p == None) and p < 0.5:
tally_0 += 1
if tally_0 == 0 and tally_1 == 0:
return None
return 0 if tally_0 > tally_1 else 1
def run_boundary_hunters(boundarys, gates, points, out):
in_size = len(points[0])
out_size = 1
inputs = tf.placeholder('float32', [in_size])
targets = tf.placeholder('float32', [out_size])
hidden_weights = tf.placeholder('float32', [None])
gate_weights = tf.placeholder('float32', [None])
inputs_prime = tf.concat([[1.0], inputs], axis=0)
g = sigmoid(tf.reduce_sum(tf.multiply(inputs_prime, gate_weights)))
prob = sigmoid(tf.reduce_sum(tf.multiply(inputs_prime, hidden_weights)))
model = tf.global_variables_initializer()
unsure = 0
guessed = 0
correct = 0
with tf.Session() as session:
session.run(model)
for d in range(len(points)):
predictions = []
for b in range(len(boundarys)):
prediction = None
care = session.run(g, feed_dict={inputs: points[d], hidden_weights: boundarys[b], gate_weights: gates[b]})
if care > 0.5:
prediction = session.run(prob, feed_dict={inputs: points[d], hidden_weights: boundarys[b], gate_weights: gates[b]})
predictions.append(prediction)
p = get_final_class(predictions)
#print(predictions, ": ", p)
if not p == None:
guessed += 1
if p == out[d]:
correct += 1
elif p == None:
unsure += 1
return float(correct)/float(guessed), float(unsure)/float(len(points))
N = 7
# Generate All Points On Hypercube
examples = __perms(N)
targets = []
# Generate Boundary Hunter
bias = np.random.uniform(0, 1, 1)
decision = np.random.uniform(-1, 1, N)
decision_b = np.random.uniform(-1, 1, 1)
caring = np.random.uniform(-1, 1, N)
caring_b = np.random.uniform(-1, 1, 1)
uncertian = 0
class1 = 0
class0 = 0
for example in examples:
clas = None
c = care(caring, caring_b, example)
if c < 0.5:
uncertian += 1
r = np.random.rand(1)
if r > bias:
clas = 1
else:
clas = 0
else:
d = deci(decision, decision_b, example)
if d >= 0.5:
clas = 1
class1 += 1
else:
clas=0
class0 += 1
targets.append(clas)
if class0 == 0 or class1 == 0:
print("Class 0: ", class0)
print("Class 1: ", class1)
print("Err")
raise "GSFE"
bh = train_boundary_hunter(examples, targets, 20000)
print("Uncertian: ", uncertian)
print("Class 0: ", class0)
print("Class 1: ", class1)
print("Bias: ", bias)
print("{}, {}".format(decision_b, decision))
print("{}, {}".format(caring_b, caring))
print(run_boundary_hunters([np.concatenate((decision_b, decision))], [np.concatenate((caring_b, caring))], examples, targets))
print()
print(bh)
print(run_boundary_hunters([bh[0]], [bh[1]], examples, targets))
| 29.943396 | 150 | 0.594045 | import numpy as np
import tensorflow as tf
def __perms(n):
if not n:
return
p = []
for i in range(0, 2**n):
s = bin(i)[2:]
s = "0" * (n-len(s)) + s
s_prime = np.array(list(map(lambda x: int(x), list(s))))
p.append(s_prime)
return p
def care(normal, bias, example):
z = np.dot(normal, example) + bias
return 1.0/(1.0 + np.exp(-z))
def deci(normal, bias, example):
z = np.dot(normal, example) + bias
return 1.0/(1.0 + np.exp(-z))
def sigmoid(phi):
return 1.0/(1.0 + tf.exp(-phi))
def compute_penalty(weights, size):
mask = np.concatenate((np.array([0], dtype=np.float32), np.ones(size, dtype=np.float32)))
return tf.reduce_sum(tf.abs(tf.multiply(mask, weights)))
def train_boundary_hunter(points, out, iterations):
in_size = len(points[0])
out_size = 1
inputs = tf.placeholder('float32', [in_size])
targets = tf.placeholder('float32', [out_size])
hidden_weights = tf.Variable(np.random.uniform(low=-0.5, high=0.5, size=(1, in_size+1)), dtype='float32')
gate_weights = tf.Variable(np.random.uniform(low=-0.5, high=0.5, size=(1, in_size+1)), dtype='float32')
byas = tf.Variable(np.random.uniform(low=-0.5, high=0.5, size=(1)), dtype='float32')
inputs_prime = tf.concat([[1.0], inputs], axis=0)
prob = tf.reduce_sum(tf.multiply(inputs_prime, hidden_weights), 1)
g = sigmoid(tf.reduce_sum(tf.multiply(inputs_prime, gate_weights), 1))
hidden_out = sigmoid(tf.add(g * prob, (1-g) * byas))
reward = tf.log(compute_penalty(hidden_weights, in_size) + compute_penalty(gate_weights, in_size))
targets_prime = tf.expand_dims(targets, 1)
output = hidden_out
errors = -(targets_prime * tf.log(output) + (1 -targets_prime) * tf.log(1 - output))
error = tf.reduce_sum(errors)
minimize = error - 0.02 * reward
train_op = tf.train.GradientDescentOptimizer(0.01).minimize(minimize)
model = tf.global_variables_initializer()
with tf.Session() as session:
session.run(model)
for e in range(iterations):
for d in range(len(points)):
session.run(train_op, feed_dict={inputs: points[d], targets: [out[d]]})
if e % 10 == 0:
print(session.run(byas))
err = 0
for d in range(len(points)):
err += session.run(error, feed_dict={inputs: points[d], targets: [out[d]]})
print(err)
print(session.run(reward))
print()
gates = session.run(gate_weights)[0]
byas = session.run(byas)[0]
boundarys = session.run(hidden_weights)[0]
return (boundarys, gates, byas)
def get_final_class(predictions):
tally_0 = 0
tally_1 = 0
for p in predictions:
if (not p == None) and p >= 0.5:
tally_1 += 1
elif (not p == None) and p < 0.5:
tally_0 += 1
if tally_0 == 0 and tally_1 == 0:
return None
return 0 if tally_0 > tally_1 else 1
def run_boundary_hunters(boundarys, gates, points, out):
in_size = len(points[0])
out_size = 1
inputs = tf.placeholder('float32', [in_size])
targets = tf.placeholder('float32', [out_size])
hidden_weights = tf.placeholder('float32', [None])
gate_weights = tf.placeholder('float32', [None])
inputs_prime = tf.concat([[1.0], inputs], axis=0)
g = sigmoid(tf.reduce_sum(tf.multiply(inputs_prime, gate_weights)))
prob = sigmoid(tf.reduce_sum(tf.multiply(inputs_prime, hidden_weights)))
model = tf.global_variables_initializer()
unsure = 0
guessed = 0
correct = 0
with tf.Session() as session:
session.run(model)
for d in range(len(points)):
predictions = []
for b in range(len(boundarys)):
prediction = None
care = session.run(g, feed_dict={inputs: points[d], hidden_weights: boundarys[b], gate_weights: gates[b]})
if care > 0.5:
prediction = session.run(prob, feed_dict={inputs: points[d], hidden_weights: boundarys[b], gate_weights: gates[b]})
predictions.append(prediction)
p = get_final_class(predictions)
if not p == None:
guessed += 1
if p == out[d]:
correct += 1
elif p == None:
unsure += 1
return float(correct)/float(guessed), float(unsure)/float(len(points))
N = 7
examples = __perms(N)
targets = []
bias = np.random.uniform(0, 1, 1)
decision = np.random.uniform(-1, 1, N)
decision_b = np.random.uniform(-1, 1, 1)
caring = np.random.uniform(-1, 1, N)
caring_b = np.random.uniform(-1, 1, 1)
uncertian = 0
class1 = 0
class0 = 0
for example in examples:
clas = None
c = care(caring, caring_b, example)
if c < 0.5:
uncertian += 1
r = np.random.rand(1)
if r > bias:
clas = 1
else:
clas = 0
else:
d = deci(decision, decision_b, example)
if d >= 0.5:
clas = 1
class1 += 1
else:
clas=0
class0 += 1
targets.append(clas)
if class0 == 0 or class1 == 0:
print("Class 0: ", class0)
print("Class 1: ", class1)
print("Err")
raise "GSFE"
bh = train_boundary_hunter(examples, targets, 20000)
print("Uncertian: ", uncertian)
print("Class 0: ", class0)
print("Class 1: ", class1)
print("Bias: ", bias)
print("{}, {}".format(decision_b, decision))
print("{}, {}".format(caring_b, caring))
print(run_boundary_hunters([np.concatenate((decision_b, decision))], [np.concatenate((caring_b, caring))], examples, targets))
print()
print(bh)
print(run_boundary_hunters([bh[0]], [bh[1]], examples, targets))
| true | true |
f726aafdc70d344f7835f59ea676ff8263ce502c | 6,600 | py | Python | Lib/site-packages/wx-2.8-msw-unicode/wx/tools/XRCed/plugins/xh_gizmos.py | ekkipermana/robotframework-test | 243ca26f69962f8cf20cd7d054e0ff3e709bc7f4 | [
"bzip2-1.0.6"
] | 27 | 2020-11-12T19:24:54.000Z | 2022-03-27T23:10:45.000Z | Lib/site-packages/wx-2.8-msw-unicode/wx/tools/XRCed/plugins/xh_gizmos.py | ekkipermana/robotframework-test | 243ca26f69962f8cf20cd7d054e0ff3e709bc7f4 | [
"bzip2-1.0.6"
] | 2 | 2020-11-02T06:30:39.000Z | 2022-02-23T18:39:55.000Z | Lib/site-packages/wx-2.8-msw-unicode/wx/tools/XRCed/plugins/xh_gizmos.py | ekkipermana/robotframework-test | 243ca26f69962f8cf20cd7d054e0ff3e709bc7f4 | [
"bzip2-1.0.6"
] | 7 | 2018-02-13T10:22:39.000Z | 2019-07-04T07:39:28.000Z | # Name: gizmos.py
# Purpose: XML handlers for wx.gismos classes
# Author: Roman Rolinsky <rolinsky@femagsoft.com>
# Created: 09.07.2007
# RCS-ID: $Id$
import wx
import wx.xrc as xrc
import wx.gizmos as gizmos
class LEDNumberCtrlXmlHandler(xrc.XmlResourceHandler):
def __init__(self):
xrc.XmlResourceHandler.__init__(self)
# Standard styles
self.AddWindowStyles()
# Custom styles
self.AddStyle('wxLED_ALIGN_LEFT', gizmos.LED_ALIGN_LEFT)
self.AddStyle('wxLED_ALIGN_RIGHT', gizmos.LED_ALIGN_RIGHT)
self.AddStyle('wxLED_ALIGN_CENTER', gizmos.LED_ALIGN_CENTER)
self.AddStyle('wxLED_DRAW_FADED', gizmos.LED_DRAW_FADED)
def CanHandle(self,node):
return self.IsOfClass(node, 'LEDNumberCtrl')
# Process XML parameters and create the object
def DoCreateResource(self):
assert self.GetInstance() is None
w = gizmos.LEDNumberCtrl(self.GetParentAsWindow(),
self.GetID(),
self.GetPosition(),
self.GetSize(),
self.GetStyle())
# wxLED_ALIGN_MASK was incorrect
align = self.GetStyle() & 7
if align: w.SetAlignment(self.GetStyle() & 7)
w.SetValue(self.GetText('value'))
self.SetupWindow(w)
return w
class EditableListBoxXmlHandler(xrc.XmlResourceHandler):
def __init__(self):
xrc.XmlResourceHandler.__init__(self)
# Standard styles
self.AddWindowStyles()
# Custom styles
self.AddStyle('wxEL_ALLOW_NEW', gizmos.EL_ALLOW_NEW)
self.AddStyle('wxEL_ALLOW_EDIT', gizmos.EL_ALLOW_EDIT)
self.AddStyle('wxEL_ALLOW_DELETE', gizmos.EL_ALLOW_DELETE)
def CanHandle(self, node):
return self.IsOfClass(node, 'EditableListBox')
# return self.IsOfClass(node, 'EditableListBox') or \
# self.insideBox and node.GetName() == 'item'
# Process XML parameters and create the object
def DoCreateResource(self):
assert self.GetInstance() is None
w = gizmos.EditableListBox(self.GetParentAsWindow(),
self.GetID(),
self.GetText("label"),
self.GetPosition(),
self.GetSize(),
self.GetStyle(),
self.GetName())
# Doesn't work
#self.insideBox = True
#self.CreateChildrenPrivately(None, self.GetParamNode('content'))
#self.insideBox = False
# Long way
strings = []
n = self.GetParamNode('content')
if n: n = n.GetChildren()
while n:
if n.GetType() != xrc.XML_ELEMENT_NODE or n.GetName() != "item":
n = n.GetNext()
continue
strings.append(n.GetNodeContent())
n = n.GetNext()
w.SetStrings(strings)
self.SetupWindow(w)
return w
class TreeListCtrlXmlHandler(xrc.XmlResourceHandler):
def __init__(self):
xrc.XmlResourceHandler.__init__(self)
# Standard styles
self.AddWindowStyles()
# Custom styles
self.AddStyle('wxDEFAULT_COL_WIDTH', gizmos.DEFAULT_COL_WIDTH)
self.AddStyle('wxTL_MODE_NAV_FULLTREE', gizmos.TL_MODE_NAV_FULLTREE)
self.AddStyle('wxTL_MODE_NAV_EXPANDED', gizmos.TL_MODE_NAV_EXPANDED)
self.AddStyle('wxTL_MODE_NAV_VISIBLE', gizmos.TL_MODE_NAV_VISIBLE)
self.AddStyle('wxTL_MODE_NAV_LEVEL', gizmos.TL_MODE_NAV_LEVEL)
self.AddStyle('wxTL_MODE_FIND_EXACT', gizmos.TL_MODE_FIND_EXACT)
self.AddStyle('wxTL_MODE_FIND_PARTIAL', gizmos.TL_MODE_FIND_PARTIAL)
self.AddStyle('wxTL_MODE_FIND_NOCASE', gizmos.TL_MODE_FIND_NOCASE)
self.AddStyle('wxTREE_HITTEST_ONITEMCOLUMN', gizmos.TREE_HITTEST_ONITEMCOLUMN)
self.AddStyle('wxTR_COLUMN_LINES', gizmos.TR_COLUMN_LINES)
self.AddStyle('wxTR_VIRTUAL', gizmos.TR_VIRTUAL)
self.AddStyle('wxTL_ALIGN_LEFT ', wx.ALIGN_LEFT)
self.AddStyle('wxTL_ALIGN_RIGHT ', wx.ALIGN_RIGHT)
self.AddStyle('wxTL_ALIGN_CENTER', wx.ALIGN_CENTER)
self.AddStyle('wxTL_SEARCH_VISIBLE', gizmos.TL_MODE_NAV_VISIBLE)
self.AddStyle('wxTL_SEARCH_LEVEL ', gizmos.TL_MODE_NAV_LEVEL)
self.AddStyle('wxTL_SEARCH_FULL ', gizmos.TL_MODE_FIND_EXACT)
self.AddStyle('wxTL_SEARCH_PARTIAL', gizmos.TL_MODE_FIND_PARTIAL)
self.AddStyle('wxTL_SEARCH_NOCASE ', gizmos.TL_MODE_FIND_NOCASE)
self.AddStyle('wxTR_DONT_ADJUST_MAC', gizmos.TR_DONT_ADJUST_MAC)
self.AddStyle('wxTR_DEFAULT_STYLE', wx.TR_DEFAULT_STYLE)
def CanHandle(self, node):
return self.IsOfClass(node, 'TreeListCtrl')
# Process XML parameters and create the object
def DoCreateResource(self):
assert self.GetInstance() is None
w = gizmos.TreeListCtrl(self.GetParentAsWindow(),
self.GetID(),
style=self.GetStyle(),
name=self.GetName())
w.AddColumn("Main column")
w.AddColumn('Column 1')
w.SetMainColumn(0)
w.SetColumnWidth(0, 50)
w.SetColumnWidth(1, 50)
root = w.AddRoot('Root')
w.SetItemText(root, "col 1", 1)
item1 = w.AppendItem(root, 'item 1')
w.SetItemText(item1, "col 1", 1)
w.Expand(root)
return w
class DynamicSashWindowXmlHandler(xrc.XmlResourceHandler):
def __init__(self):
xrc.XmlResourceHandler.__init__(self)
# Standard styles
self.AddWindowStyles()
# Custom styles
self.AddStyle('wxDS_MANAGE_SCROLLBARS', gizmos.DS_MANAGE_SCROLLBARS)
self.AddStyle('wxDS_DRAG_CORNER', gizmos.DS_DRAG_CORNER)
def CanHandle(self, node):
return self.IsOfClass(node, 'DynamicSashWindow')
# Process XML parameters and create the object
def DoCreateResource(self):
assert self.GetInstance() is None
w = gizmos.DynamicSashWindow(self.GetParentAsWindow(),
self.GetID(),
self.GetPosition(),
self.GetSize(),
self.GetStyle(),
self.GetName())
self.SetupWindow(w)
return w
| 39.285714 | 86 | 0.601667 |
import wx
import wx.xrc as xrc
import wx.gizmos as gizmos
class LEDNumberCtrlXmlHandler(xrc.XmlResourceHandler):
def __init__(self):
xrc.XmlResourceHandler.__init__(self)
self.AddWindowStyles()
self.AddStyle('wxLED_ALIGN_LEFT', gizmos.LED_ALIGN_LEFT)
self.AddStyle('wxLED_ALIGN_RIGHT', gizmos.LED_ALIGN_RIGHT)
self.AddStyle('wxLED_ALIGN_CENTER', gizmos.LED_ALIGN_CENTER)
self.AddStyle('wxLED_DRAW_FADED', gizmos.LED_DRAW_FADED)
def CanHandle(self,node):
return self.IsOfClass(node, 'LEDNumberCtrl')
def DoCreateResource(self):
assert self.GetInstance() is None
w = gizmos.LEDNumberCtrl(self.GetParentAsWindow(),
self.GetID(),
self.GetPosition(),
self.GetSize(),
self.GetStyle())
align = self.GetStyle() & 7
if align: w.SetAlignment(self.GetStyle() & 7)
w.SetValue(self.GetText('value'))
self.SetupWindow(w)
return w
class EditableListBoxXmlHandler(xrc.XmlResourceHandler):
def __init__(self):
xrc.XmlResourceHandler.__init__(self)
self.AddWindowStyles()
self.AddStyle('wxEL_ALLOW_NEW', gizmos.EL_ALLOW_NEW)
self.AddStyle('wxEL_ALLOW_EDIT', gizmos.EL_ALLOW_EDIT)
self.AddStyle('wxEL_ALLOW_DELETE', gizmos.EL_ALLOW_DELETE)
def CanHandle(self, node):
return self.IsOfClass(node, 'EditableListBox')
def DoCreateResource(self):
assert self.GetInstance() is None
w = gizmos.EditableListBox(self.GetParentAsWindow(),
self.GetID(),
self.GetText("label"),
self.GetPosition(),
self.GetSize(),
self.GetStyle(),
self.GetName())
#self.insideBox = True
#self.CreateChildrenPrivately(None, self.GetParamNode('content'))
#self.insideBox = False
# Long way
strings = []
n = self.GetParamNode('content')
if n: n = n.GetChildren()
while n:
if n.GetType() != xrc.XML_ELEMENT_NODE or n.GetName() != "item":
n = n.GetNext()
continue
strings.append(n.GetNodeContent())
n = n.GetNext()
w.SetStrings(strings)
self.SetupWindow(w)
return w
class TreeListCtrlXmlHandler(xrc.XmlResourceHandler):
def __init__(self):
xrc.XmlResourceHandler.__init__(self)
# Standard styles
self.AddWindowStyles()
# Custom styles
self.AddStyle('wxDEFAULT_COL_WIDTH', gizmos.DEFAULT_COL_WIDTH)
self.AddStyle('wxTL_MODE_NAV_FULLTREE', gizmos.TL_MODE_NAV_FULLTREE)
self.AddStyle('wxTL_MODE_NAV_EXPANDED', gizmos.TL_MODE_NAV_EXPANDED)
self.AddStyle('wxTL_MODE_NAV_VISIBLE', gizmos.TL_MODE_NAV_VISIBLE)
self.AddStyle('wxTL_MODE_NAV_LEVEL', gizmos.TL_MODE_NAV_LEVEL)
self.AddStyle('wxTL_MODE_FIND_EXACT', gizmos.TL_MODE_FIND_EXACT)
self.AddStyle('wxTL_MODE_FIND_PARTIAL', gizmos.TL_MODE_FIND_PARTIAL)
self.AddStyle('wxTL_MODE_FIND_NOCASE', gizmos.TL_MODE_FIND_NOCASE)
self.AddStyle('wxTREE_HITTEST_ONITEMCOLUMN', gizmos.TREE_HITTEST_ONITEMCOLUMN)
self.AddStyle('wxTR_COLUMN_LINES', gizmos.TR_COLUMN_LINES)
self.AddStyle('wxTR_VIRTUAL', gizmos.TR_VIRTUAL)
self.AddStyle('wxTL_ALIGN_LEFT ', wx.ALIGN_LEFT)
self.AddStyle('wxTL_ALIGN_RIGHT ', wx.ALIGN_RIGHT)
self.AddStyle('wxTL_ALIGN_CENTER', wx.ALIGN_CENTER)
self.AddStyle('wxTL_SEARCH_VISIBLE', gizmos.TL_MODE_NAV_VISIBLE)
self.AddStyle('wxTL_SEARCH_LEVEL ', gizmos.TL_MODE_NAV_LEVEL)
self.AddStyle('wxTL_SEARCH_FULL ', gizmos.TL_MODE_FIND_EXACT)
self.AddStyle('wxTL_SEARCH_PARTIAL', gizmos.TL_MODE_FIND_PARTIAL)
self.AddStyle('wxTL_SEARCH_NOCASE ', gizmos.TL_MODE_FIND_NOCASE)
self.AddStyle('wxTR_DONT_ADJUST_MAC', gizmos.TR_DONT_ADJUST_MAC)
self.AddStyle('wxTR_DEFAULT_STYLE', wx.TR_DEFAULT_STYLE)
def CanHandle(self, node):
return self.IsOfClass(node, 'TreeListCtrl')
# Process XML parameters and create the object
def DoCreateResource(self):
assert self.GetInstance() is None
w = gizmos.TreeListCtrl(self.GetParentAsWindow(),
self.GetID(),
style=self.GetStyle(),
name=self.GetName())
w.AddColumn("Main column")
w.AddColumn('Column 1')
w.SetMainColumn(0)
w.SetColumnWidth(0, 50)
w.SetColumnWidth(1, 50)
root = w.AddRoot('Root')
w.SetItemText(root, "col 1", 1)
item1 = w.AppendItem(root, 'item 1')
w.SetItemText(item1, "col 1", 1)
w.Expand(root)
return w
class DynamicSashWindowXmlHandler(xrc.XmlResourceHandler):
def __init__(self):
xrc.XmlResourceHandler.__init__(self)
# Standard styles
self.AddWindowStyles()
# Custom styles
self.AddStyle('wxDS_MANAGE_SCROLLBARS', gizmos.DS_MANAGE_SCROLLBARS)
self.AddStyle('wxDS_DRAG_CORNER', gizmos.DS_DRAG_CORNER)
def CanHandle(self, node):
return self.IsOfClass(node, 'DynamicSashWindow')
# Process XML parameters and create the object
def DoCreateResource(self):
assert self.GetInstance() is None
w = gizmos.DynamicSashWindow(self.GetParentAsWindow(),
self.GetID(),
self.GetPosition(),
self.GetSize(),
self.GetStyle(),
self.GetName())
self.SetupWindow(w)
return w
| true | true |
f726ab4ccfc111179e65303e6251acccc8a648d5 | 4,241 | py | Python | loot_tables.py | Battlecats59/MCBELootRandomizer | de5c49c65fb12c1e3ec391b665bdfd9a5c64c7cc | [
"MIT"
] | null | null | null | loot_tables.py | Battlecats59/MCBELootRandomizer | de5c49c65fb12c1e3ec391b665bdfd9a5c64c7cc | [
"MIT"
] | null | null | null | loot_tables.py | Battlecats59/MCBELootRandomizer | de5c49c65fb12c1e3ec391b665bdfd9a5c64c7cc | [
"MIT"
] | null | null | null | import os
import json
import yaml
from typing import OrderedDict
from yaml.loader import FullLoader
from paths import RANDO_ROOT_PATH
class loot_tables:
def get_loot_tables(self, options):
with (RANDO_ROOT_PATH / 'loot_table_categories.yaml').open('r') as loot_tables:
self.loot_table_list = yaml.load(loot_tables, Loader=FullLoader)
self.randomized_mob_loot_table_list = []
self.unrandomized_mob_loot_table_list = []
self.randomized_chest_loot_table_list = []
self.unrandomized_chest_loot_table_list = []
for mob_lt in self.loot_table_list['entities']:
if options['version'] in [str(ver) for ver in mob_lt['versions']]:
if options['randomized_' + mob_lt['type']] == True:
self.randomized_mob_loot_table_list.append(mob_lt)
else:
self.unrandomized_mob_loot_table_list.append(mob_lt)
else:
continue
for chest_lt in self.loot_table_list['chests']:
if options['version'] in [str(ver) for ver in chest_lt['versions']]:
if options['randomized_' + chest_lt['type'] + '_chests'] == True:
self.randomized_chest_loot_table_list.append(chest_lt)
else:
self.unrandomized_chest_loot_table_list.append(chest_lt)
else:
continue
self.mob_loot_tables_list = self.randomized_mob_loot_table_list + self.unrandomized_mob_loot_table_list
self.chest_loot_tables_list = self.randomized_chest_loot_table_list + self.unrandomized_chest_loot_table_list
return self.randomized_mob_loot_table_list, self.unrandomized_mob_loot_table_list, self.randomized_chest_loot_table_list, self.unrandomized_chest_loot_table_list
def read_loot_tables(self, mob_loot_table_list, chest_loot_table_list):
self.loot_table_path = 'loot_tables'
self.mob_r_loot_tables = []
self.mob_s_loot_tables = []
self.chest_r_loot_tables = []
self.chest_s_loot_tables = []
self.patched_mob_loot_table_list = []
for m in mob_loot_table_list:
with (RANDO_ROOT_PATH / self.loot_table_path / 'entities' / m['file']).open('r') as mlt:
self.mob_loot_table = json.load(mlt)
self.mob_r_loot_tables.append(self.mob_loot_table)
if self.mob_loot_table == {}:
m['empty'] = True
else:
m['empty'] = False
self.mob_s_loot_tables.append(m['name'])
self.patched_mob_loot_table_list.append(m)
for c in chest_loot_table_list:
with (RANDO_ROOT_PATH / self.loot_table_path / 'chests' / c['file']).open('r') as clt:
self.chest_r_loot_tables.append(json.load(clt))
self.chest_s_loot_tables.append(c['name'])
return self.mob_r_loot_tables, self.mob_s_loot_tables, self.chest_r_loot_tables, self.chest_s_loot_tables, self.patched_mob_loot_table_list
def write_loot_tables(self, mob_loot_tables, mob_s_loot_tables, chest_loot_tables, chest_s_loot_tables):
self.mob_loot_tables_names = []
self.mob_loot_tables_files = []
self.chest_loot_tables_names = []
self.chest_loot_tables_files = []
for mlt in self.mob_loot_tables_list:
self.mob_loot_tables_names.append(mlt['name'])
self.mob_loot_tables_files.append(mlt['file'])
for clt in self.chest_loot_tables_list:
self.chest_loot_tables_names.append(clt['name'])
self.chest_loot_tables_files.append(clt['file'])
self.patched_mob_loot_tables = OrderedDict(zip(self.mob_loot_tables_files, mob_loot_tables))
self.spoiler_mob_loot_tables = OrderedDict(zip(self.mob_loot_tables_names, mob_s_loot_tables))
self.patched_chest_loot_tables = OrderedDict(zip(self.chest_loot_tables_files, chest_loot_tables))
self.spoiler_chest_loot_tables = OrderedDict(zip(self.chest_loot_tables_names, chest_s_loot_tables))
return self.patched_mob_loot_tables, self.spoiler_mob_loot_tables, self.patched_chest_loot_tables, self.spoiler_chest_loot_tables | 47.651685 | 169 | 0.680736 | import os
import json
import yaml
from typing import OrderedDict
from yaml.loader import FullLoader
from paths import RANDO_ROOT_PATH
class loot_tables:
def get_loot_tables(self, options):
with (RANDO_ROOT_PATH / 'loot_table_categories.yaml').open('r') as loot_tables:
self.loot_table_list = yaml.load(loot_tables, Loader=FullLoader)
self.randomized_mob_loot_table_list = []
self.unrandomized_mob_loot_table_list = []
self.randomized_chest_loot_table_list = []
self.unrandomized_chest_loot_table_list = []
for mob_lt in self.loot_table_list['entities']:
if options['version'] in [str(ver) for ver in mob_lt['versions']]:
if options['randomized_' + mob_lt['type']] == True:
self.randomized_mob_loot_table_list.append(mob_lt)
else:
self.unrandomized_mob_loot_table_list.append(mob_lt)
else:
continue
for chest_lt in self.loot_table_list['chests']:
if options['version'] in [str(ver) for ver in chest_lt['versions']]:
if options['randomized_' + chest_lt['type'] + '_chests'] == True:
self.randomized_chest_loot_table_list.append(chest_lt)
else:
self.unrandomized_chest_loot_table_list.append(chest_lt)
else:
continue
self.mob_loot_tables_list = self.randomized_mob_loot_table_list + self.unrandomized_mob_loot_table_list
self.chest_loot_tables_list = self.randomized_chest_loot_table_list + self.unrandomized_chest_loot_table_list
return self.randomized_mob_loot_table_list, self.unrandomized_mob_loot_table_list, self.randomized_chest_loot_table_list, self.unrandomized_chest_loot_table_list
def read_loot_tables(self, mob_loot_table_list, chest_loot_table_list):
self.loot_table_path = 'loot_tables'
self.mob_r_loot_tables = []
self.mob_s_loot_tables = []
self.chest_r_loot_tables = []
self.chest_s_loot_tables = []
self.patched_mob_loot_table_list = []
for m in mob_loot_table_list:
with (RANDO_ROOT_PATH / self.loot_table_path / 'entities' / m['file']).open('r') as mlt:
self.mob_loot_table = json.load(mlt)
self.mob_r_loot_tables.append(self.mob_loot_table)
if self.mob_loot_table == {}:
m['empty'] = True
else:
m['empty'] = False
self.mob_s_loot_tables.append(m['name'])
self.patched_mob_loot_table_list.append(m)
for c in chest_loot_table_list:
with (RANDO_ROOT_PATH / self.loot_table_path / 'chests' / c['file']).open('r') as clt:
self.chest_r_loot_tables.append(json.load(clt))
self.chest_s_loot_tables.append(c['name'])
return self.mob_r_loot_tables, self.mob_s_loot_tables, self.chest_r_loot_tables, self.chest_s_loot_tables, self.patched_mob_loot_table_list
def write_loot_tables(self, mob_loot_tables, mob_s_loot_tables, chest_loot_tables, chest_s_loot_tables):
self.mob_loot_tables_names = []
self.mob_loot_tables_files = []
self.chest_loot_tables_names = []
self.chest_loot_tables_files = []
for mlt in self.mob_loot_tables_list:
self.mob_loot_tables_names.append(mlt['name'])
self.mob_loot_tables_files.append(mlt['file'])
for clt in self.chest_loot_tables_list:
self.chest_loot_tables_names.append(clt['name'])
self.chest_loot_tables_files.append(clt['file'])
self.patched_mob_loot_tables = OrderedDict(zip(self.mob_loot_tables_files, mob_loot_tables))
self.spoiler_mob_loot_tables = OrderedDict(zip(self.mob_loot_tables_names, mob_s_loot_tables))
self.patched_chest_loot_tables = OrderedDict(zip(self.chest_loot_tables_files, chest_loot_tables))
self.spoiler_chest_loot_tables = OrderedDict(zip(self.chest_loot_tables_names, chest_s_loot_tables))
return self.patched_mob_loot_tables, self.spoiler_mob_loot_tables, self.patched_chest_loot_tables, self.spoiler_chest_loot_tables | true | true |
f726ace460929f064637dcdfe1b9260b82a5a76e | 1,618 | py | Python | examples/ad_manager/v201802/publisher_query_language_service/get_line_items_named_like.py | khanhnhk/googleads-python-lib | 1e882141b8eb663b55dd582ce0f4fbf3cd2f672d | [
"Apache-2.0"
] | 1 | 2021-12-30T15:21:42.000Z | 2021-12-30T15:21:42.000Z | examples/ad_manager/v201802/publisher_query_language_service/get_line_items_named_like.py | benlistyg/googleads-python-lib | 1e882141b8eb663b55dd582ce0f4fbf3cd2f672d | [
"Apache-2.0"
] | null | null | null | examples/ad_manager/v201802/publisher_query_language_service/get_line_items_named_like.py | benlistyg/googleads-python-lib | 1e882141b8eb663b55dd582ce0f4fbf3cd2f672d | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
#
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example fetches line items from the pql table with a LIKE clause."""
import tempfile
# Import appropriate modules from the client library.
from googleads import ad_manager
def main(client):
# Initialize a report downloader.
report_downloader = client.GetDataDownloader(version='v201802')
with tempfile.NamedTemporaryFile(
prefix='line_items_',
suffix='.csv', mode='w', delete=False) as line_items_file:
line_items_pql_query = ("SELECT Id, Name, Status FROM Line_Item "
"WHERE Name LIKE 'line item%' "
"ORDER BY Id ASC")
# Downloads the response from PQL select statement to the specified file
report_downloader.DownloadPqlResultToCsv(
line_items_pql_query, line_items_file)
print 'Saved line items to... %s' % line_items_file.name
if __name__ == '__main__':
# Initialize client object.
ad_manager_client = ad_manager.AdManagerClient.LoadFromStorage()
main(ad_manager_client)
| 33.020408 | 76 | 0.725587 |
"""This example fetches line items from the pql table with a LIKE clause."""
import tempfile
from googleads import ad_manager
def main(client):
report_downloader = client.GetDataDownloader(version='v201802')
with tempfile.NamedTemporaryFile(
prefix='line_items_',
suffix='.csv', mode='w', delete=False) as line_items_file:
line_items_pql_query = ("SELECT Id, Name, Status FROM Line_Item "
"WHERE Name LIKE 'line item%' "
"ORDER BY Id ASC")
report_downloader.DownloadPqlResultToCsv(
line_items_pql_query, line_items_file)
print 'Saved line items to... %s' % line_items_file.name
if __name__ == '__main__':
ad_manager_client = ad_manager.AdManagerClient.LoadFromStorage()
main(ad_manager_client)
| false | true |
f726ad04313fae09750869fe143024d0fb1c7b02 | 1,794 | py | Python | release/stubs.min/System/Drawing/__init___parts/CopyPixelOperation.py | tranconbv/ironpython-stubs | a601759e6c6819beff8e6b639d18a24b7e351851 | [
"MIT"
] | null | null | null | release/stubs.min/System/Drawing/__init___parts/CopyPixelOperation.py | tranconbv/ironpython-stubs | a601759e6c6819beff8e6b639d18a24b7e351851 | [
"MIT"
] | null | null | null | release/stubs.min/System/Drawing/__init___parts/CopyPixelOperation.py | tranconbv/ironpython-stubs | a601759e6c6819beff8e6b639d18a24b7e351851 | [
"MIT"
] | null | null | null | class CopyPixelOperation(Enum,IComparable,IFormattable,IConvertible):
"""
Determines how the source color in a copy pixel operation is combined with the destination color to result in a final color.
enum CopyPixelOperation,values: Blackness (66),CaptureBlt (1073741824),DestinationInvert (5570569),MergeCopy (12583114),MergePaint (12255782),NoMirrorBitmap (-2147483648),NotSourceCopy (3342344),NotSourceErase (1114278),PatCopy (15728673),PatInvert (5898313),PatPaint (16452105),SourceAnd (8913094),SourceCopy (13369376),SourceErase (4457256),SourceInvert (6684742),SourcePaint (15597702),Whiteness (16711778)
"""
def Instance(self):
""" This function has been arbitrarily put into the stubs"""
return CopyPixelOperation()
def __eq__(self,*args):
""" x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """
pass
def __format__(self,*args):
""" __format__(formattable: IFormattable,format: str) -> str """
pass
def __ge__(self,*args):
pass
def __gt__(self,*args):
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __le__(self,*args):
pass
def __lt__(self,*args):
pass
def __ne__(self,*args):
pass
def __reduce_ex__(self,*args):
pass
def __str__(self,*args):
pass
Blackness=None
CaptureBlt=None
DestinationInvert=None
MergeCopy=None
MergePaint=None
NoMirrorBitmap=None
NotSourceCopy=None
NotSourceErase=None
PatCopy=None
PatInvert=None
PatPaint=None
SourceAnd=None
SourceCopy=None
SourceErase=None
SourceInvert=None
SourcePaint=None
value__=None
Whiteness=None
| 33.849057 | 411 | 0.726867 | class CopyPixelOperation(Enum,IComparable,IFormattable,IConvertible):
return CopyPixelOperation()
def __eq__(self,*args):
""" x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """
pass
""" __format__(formattable: IFormattable,format: str) -> str """
pass
pass
def __gt__(self,*args):
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
pass
def __lt__(self,*args):
pass
def __ne__(self,*args):
pass
def __reduce_ex__(self,*args):
pass
def __str__(self,*args):
pass
Blackness=None
CaptureBlt=None
DestinationInvert=None
MergeCopy=None
MergePaint=None
NoMirrorBitmap=None
NotSourceCopy=None
NotSourceErase=None
PatCopy=None
PatInvert=None
PatPaint=None
SourceAnd=None
SourceCopy=None
SourceErase=None
SourceInvert=None
SourcePaint=None
value__=None
Whiteness=None
| true | true |
f726ad8c7093593009fdde16473a6c6d5e036bd8 | 11,476 | py | Python | python/GUI/__main__.py | andreehultgren/soduko | aae2d174e417d00ed60206f4567e554d25aa4311 | [
"MIT"
] | null | null | null | python/GUI/__main__.py | andreehultgren/soduko | aae2d174e417d00ed60206f4567e554d25aa4311 | [
"MIT"
] | null | null | null | python/GUI/__main__.py | andreehultgren/soduko | aae2d174e417d00ed60206f4567e554d25aa4311 | [
"MIT"
] | null | null | null | import pygame
from random import sample, randint, random
from tabulate import tabulate
config = {
"cell_width" : 50,
"cell_height" : 50,
"cell_color" : (235,235,235),
"cell_color_hover" : (220,220,255),
"cell_color_locked" : (255,220,220),
"cell_color_editing" : (220,255,220),
"cell_color_wrong" : (255,120,120),
"cell_padding" : 3,
"background" : (0,0,0),
"color_number" : (0,0,0),
"window_name" : "Soduko!",
"difficulty" : 0.5,
}
class Game(object):
def __init__(self, config):
self.config = config
#Initiate pygame
pygame.init()
#Configure pygame according to the config file.
self.configure_window()
self.configure_clock()
self.configure_fonts()
#Generate the soduko-board
self.generate_board()
#Finally, some descriptive parameters
self.win = False
self.running = True
def configure_fonts(self):
font_big = int(config['cell_height']*0.8)
font_small = int(config['cell_height']*0.4)
self.font = pygame.font.SysFont('comicsansms', font_big)
self.font_initial = pygame.font.SysFont('comicsansms', font_big, True)
self.font_predicted = pygame.font.SysFont('comicsansms', font_small)
def configure_clock(self):
self.clock = pygame.time.Clock()
def configure_window(self):
pygame.display.set_caption(config['window_name'])
window_width = 9*config['cell_width' ]+14*config['cell_padding']
window_height = 9*config['cell_height']+14*config['cell_padding']
self.view = pygame.display.set_mode((window_width, window_height))
def check_mouse(self):
self.pos = pygame.mouse.get_pos()
self.pressed,_,_= pygame.mouse.get_pressed()
def draw_board(self):
for row in self.cell_board:
for cell in row:
cell.draw()
def draw_background(self):
self.view.fill(self.config['background'])
def generate_board(self):
#Generate a solution of the board. Convert to cell structure.
solution = self.generate_board_full()
self.cell_board = [[Cell(self,i,j,value) if random()>config['difficulty'] else Cell(self,i,j,0) for j,value in enumerate(row)] for i,row in enumerate(solution)]
def generate_board_full(self):
#Not my code. Taken from https://stackoverflow.com/questions/45471152/how-to-create-a-sudoku-puzzle-in-python/#answer-56581709
base = 3
# pattern for a baseline valid solution
def pattern(r,c): return (base*(r%base)+r//base+c)%(base**2)
# randomize rows, columns and numbers (of valid base pattern)
def shuffle(s): return sample(s,len(s))
rBase = range(base)
rows = [ g*base + r for g in shuffle(rBase) for r in shuffle(rBase) ]
cols = [ g*base + c for g in shuffle(rBase) for c in shuffle(rBase) ]
nums = shuffle(range(1,base*base+1))
# produce board using randomized baseline pattern
board = [ [nums[pattern(r,c)] for c in cols] for r in rows ]
return board
def check_keyboard(self):
#Keyboard commands that applies to the whole board.
pressed = pygame.key.get_pressed()
if pressed[pygame.K_SPACE]:
self.solve(self.cell_board)
self.check_for_win()
def check_for_win(self):
validity = self.check_validity(self.cell_board)
all_entry = True
for row in self.cell_board:
for cell in row:
if cell.value is 0: all_entry = False
self.win = validity*all_entry
def check_validity(self, board):
combinations = []
squares = [[0,1,2], [3,4,5], [6,7,8]]
valid = True
#Add columns and rows
for i in range(9):
combinations.append([ 9*i+k for k in range(9)])
combinations.append([ i+k*9 for k in range(9)])
#Add squares
for row in squares:
for col in squares:
combinations.append([a*9+b for b in col for a in row])
#Check combinations
for combination in combinations:
#Count the amount of occurences of each number
counter = [0 for _ in range(9)]
for position in combination:
row, col = position//9, position%9
value = board[row][col].value
if value is not 0:
counter[value-1] += 1
#Check if it exceeds one
for count in counter:
if count >1:
valid=False
return valid
def solve(self, board, position=0):
#At each step, update the board
self.draw_background()
self.draw_board()
pygame.display.update()
#Set the framerate to alot.
self.clock.tick(10000)
row, col = position//9, position%9
sol_found = False
#Check the if we are at the end
if position>=81:
return True, board
#Skip if it is an initial value
if board[row][col].initial_value != 0:
sol_found, board = self.solve(board, position+1)
if sol_found:
return True, board
else:
return False, board
#Try all different values:
for value in range(1,10):
board[row][col].value = value
valid_solution = self.check_validity(board)
if valid_solution:
sol_found, board = self.solve(board, position+1)
if sol_found:
return True, board
board[row][col].value = 0
return False, board
class Cell(object):
def __init__(self, parent, row, col, value):
self.value = value
self.row = row
self.col = col
self.initial_value = value
self.parent = parent
self.clicked = False
self.hover = False
self.predicted = []
self.correct = None
def draw(self):
#Plot the square. Fix the formatting after
square = self.draw_cell(self.parent.config['cell_color'])
color = self.pick_color(square)
square = self.draw_cell(self.parent.config[color])
self.add_text(square)
def pick_color(self, square):
color = 'cell_color'
#Check if it's correct
if self.correct==False:
color = 'cell_color_wrong'
#Check hover
if square.collidepoint(self.parent.pos):
self.hover = True
color = 'cell_color_hover'
else:
self.hover = False
self.clicked= False
#Check click
if self.hover and self.parent.pressed:
self.clicked = True
#Update value if clicked
if self.clicked and self.hover:
if self.initial_value!=0:
color = 'cell_color_locked'
else:
color = 'cell_color_editing'
self.listen_for_number()
return color
def add_text(self, square):
#Stringify the value. Don't show a number
if self.value != 0:
cell_data = str(self.value)
else:
cell_data = ''
for digit in self.predicted:
cell_data += str(digit)
if self.initial_value!=0:
text = self.parent.font_initial.render(cell_data, True, self.parent.config['color_number'])
elif len(self.predicted)>0:
text = self.parent.font_predicted.render(cell_data, True, self.parent.config['color_number'])
else:
text = self.parent.font.render(cell_data, True, self.parent.config['color_number'])
#Add text to the square
textRect = text.get_rect()
textRect.center = square.center
self.parent.view.blit(text, textRect)
def draw_cell(self, color):
#Compute position of the square
x_pos = self.col*(self.parent.config['cell_width']+self.parent.config['cell_padding'])+(1+self.col//3)*self.parent.config['cell_padding']
y_pos = self.row*(self.parent.config['cell_height']+self.parent.config['cell_padding'])+(1+self.row//3)*self.parent.config['cell_padding']
return pygame.draw.rect(self.parent.view, color, (x_pos, y_pos, self.parent.config['cell_width'], self.parent.config['cell_height']))
def listen_for_number(self):
pressed = pygame.key.get_pressed()
if pressed[pygame.K_1] or pressed[pygame.K_KP1]: self.predict(1)
if pressed[pygame.K_2] or pressed[pygame.K_KP2]: self.predict(2)
if pressed[pygame.K_3] or pressed[pygame.K_KP3]: self.predict(3)
if pressed[pygame.K_4] or pressed[pygame.K_KP4]: self.predict(4)
if pressed[pygame.K_5] or pressed[pygame.K_KP5]: self.predict(5)
if pressed[pygame.K_6] or pressed[pygame.K_KP6]: self.predict(6)
if pressed[pygame.K_7] or pressed[pygame.K_KP7]: self.predict(7)
if pressed[pygame.K_8] or pressed[pygame.K_KP8]: self.predict(8)
if pressed[pygame.K_9] or pressed[pygame.K_KP9]: self.predict(9)
if pressed[pygame.K_DELETE] or pressed[pygame.K_BACKSPACE]:
try:
self.predicted.remove(self.predicted[-1])
except:
pass
self.set_number(0)
if pressed[pygame.K_RETURN] or pressed[pygame.K_KP_ENTER]:
if len(self.predicted) == 1:
self.set_number(self.predicted[0])
self.predicted=[]
self.parent.check_for_win()
def set_number(self, number):
self.parent.pressed = True
self.correct = None
valid_input = self.parent.check_validity(self.parent.cell_board)
if not valid_input:
self.correct = False
self.value = number
def predict(self, number):
self.parent.pressed = True
if self.value == 0:
if number not in self.predicted:
self.predicted.append(number)
# Draw Once
game = Game(config)
while game.running:
#Get input
game.check_mouse()
game.check_keyboard()
if not game.win:
#Draw the board
game.draw_background()
game.draw_board()
else:
print("WIN")
print(game.win)
exit()
#Update view
pygame.display.update()
#Limit framerate to 15 fps.
game.clock.tick(15)
#Handle quitting the game
for event in pygame.event.get():
if event.type == pygame.QUIT:
game.running = False
| 36.08805 | 174 | 0.548013 | import pygame
from random import sample, randint, random
from tabulate import tabulate
config = {
"cell_width" : 50,
"cell_height" : 50,
"cell_color" : (235,235,235),
"cell_color_hover" : (220,220,255),
"cell_color_locked" : (255,220,220),
"cell_color_editing" : (220,255,220),
"cell_color_wrong" : (255,120,120),
"cell_padding" : 3,
"background" : (0,0,0),
"color_number" : (0,0,0),
"window_name" : "Soduko!",
"difficulty" : 0.5,
}
class Game(object):
def __init__(self, config):
self.config = config
pygame.init()
self.configure_window()
self.configure_clock()
self.configure_fonts()
self.generate_board()
self.win = False
self.running = True
def configure_fonts(self):
font_big = int(config['cell_height']*0.8)
font_small = int(config['cell_height']*0.4)
self.font = pygame.font.SysFont('comicsansms', font_big)
self.font_initial = pygame.font.SysFont('comicsansms', font_big, True)
self.font_predicted = pygame.font.SysFont('comicsansms', font_small)
def configure_clock(self):
self.clock = pygame.time.Clock()
def configure_window(self):
pygame.display.set_caption(config['window_name'])
window_width = 9*config['cell_width' ]+14*config['cell_padding']
window_height = 9*config['cell_height']+14*config['cell_padding']
self.view = pygame.display.set_mode((window_width, window_height))
def check_mouse(self):
self.pos = pygame.mouse.get_pos()
self.pressed,_,_= pygame.mouse.get_pressed()
def draw_board(self):
for row in self.cell_board:
for cell in row:
cell.draw()
def draw_background(self):
self.view.fill(self.config['background'])
def generate_board(self):
solution = self.generate_board_full()
self.cell_board = [[Cell(self,i,j,value) if random()>config['difficulty'] else Cell(self,i,j,0) for j,value in enumerate(row)] for i,row in enumerate(solution)]
def generate_board_full(self):
= 3
def pattern(r,c): return (base*(r%base)+r//base+c)%(base**2)
def shuffle(s): return sample(s,len(s))
rBase = range(base)
rows = [ g*base + r for g in shuffle(rBase) for r in shuffle(rBase) ]
cols = [ g*base + c for g in shuffle(rBase) for c in shuffle(rBase) ]
nums = shuffle(range(1,base*base+1))
board = [ [nums[pattern(r,c)] for c in cols] for r in rows ]
return board
def check_keyboard(self):
pressed = pygame.key.get_pressed()
if pressed[pygame.K_SPACE]:
self.solve(self.cell_board)
self.check_for_win()
def check_for_win(self):
validity = self.check_validity(self.cell_board)
all_entry = True
for row in self.cell_board:
for cell in row:
if cell.value is 0: all_entry = False
self.win = validity*all_entry
def check_validity(self, board):
combinations = []
squares = [[0,1,2], [3,4,5], [6,7,8]]
valid = True
for i in range(9):
combinations.append([ 9*i+k for k in range(9)])
combinations.append([ i+k*9 for k in range(9)])
for row in squares:
for col in squares:
combinations.append([a*9+b for b in col for a in row])
for combination in combinations:
counter = [0 for _ in range(9)]
for position in combination:
row, col = position//9, position%9
value = board[row][col].value
if value is not 0:
counter[value-1] += 1
for count in counter:
if count >1:
valid=False
return valid
def solve(self, board, position=0):
self.draw_background()
self.draw_board()
pygame.display.update()
self.clock.tick(10000)
row, col = position//9, position%9
sol_found = False
if position>=81:
return True, board
if board[row][col].initial_value != 0:
sol_found, board = self.solve(board, position+1)
if sol_found:
return True, board
else:
return False, board
for value in range(1,10):
board[row][col].value = value
valid_solution = self.check_validity(board)
if valid_solution:
sol_found, board = self.solve(board, position+1)
if sol_found:
return True, board
board[row][col].value = 0
return False, board
class Cell(object):
def __init__(self, parent, row, col, value):
self.value = value
self.row = row
self.col = col
self.initial_value = value
self.parent = parent
self.clicked = False
self.hover = False
self.predicted = []
self.correct = None
def draw(self):
square = self.draw_cell(self.parent.config['cell_color'])
color = self.pick_color(square)
square = self.draw_cell(self.parent.config[color])
self.add_text(square)
def pick_color(self, square):
color = 'cell_color'
if self.correct==False:
color = 'cell_color_wrong'
#Check hover
if square.collidepoint(self.parent.pos):
self.hover = True
color = 'cell_color_hover'
else:
self.hover = False
self.clicked= False
#Check click
if self.hover and self.parent.pressed:
self.clicked = True
#Update value if clicked
if self.clicked and self.hover:
if self.initial_value!=0:
color = 'cell_color_locked'
else:
color = 'cell_color_editing'
self.listen_for_number()
return color
def add_text(self, square):
#Stringify the value. Don't show a number
if self.value != 0:
cell_data = str(self.value)
else:
cell_data = ''
for digit in self.predicted:
cell_data += str(digit)
if self.initial_value!=0:
text = self.parent.font_initial.render(cell_data, True, self.parent.config['color_number'])
elif len(self.predicted)>0:
text = self.parent.font_predicted.render(cell_data, True, self.parent.config['color_number'])
else:
text = self.parent.font.render(cell_data, True, self.parent.config['color_number'])
textRect = text.get_rect()
textRect.center = square.center
self.parent.view.blit(text, textRect)
def draw_cell(self, color):
x_pos = self.col*(self.parent.config['cell_width']+self.parent.config['cell_padding'])+(1+self.col//3)*self.parent.config['cell_padding']
y_pos = self.row*(self.parent.config['cell_height']+self.parent.config['cell_padding'])+(1+self.row//3)*self.parent.config['cell_padding']
return pygame.draw.rect(self.parent.view, color, (x_pos, y_pos, self.parent.config['cell_width'], self.parent.config['cell_height']))
def listen_for_number(self):
pressed = pygame.key.get_pressed()
if pressed[pygame.K_1] or pressed[pygame.K_KP1]: self.predict(1)
if pressed[pygame.K_2] or pressed[pygame.K_KP2]: self.predict(2)
if pressed[pygame.K_3] or pressed[pygame.K_KP3]: self.predict(3)
if pressed[pygame.K_4] or pressed[pygame.K_KP4]: self.predict(4)
if pressed[pygame.K_5] or pressed[pygame.K_KP5]: self.predict(5)
if pressed[pygame.K_6] or pressed[pygame.K_KP6]: self.predict(6)
if pressed[pygame.K_7] or pressed[pygame.K_KP7]: self.predict(7)
if pressed[pygame.K_8] or pressed[pygame.K_KP8]: self.predict(8)
if pressed[pygame.K_9] or pressed[pygame.K_KP9]: self.predict(9)
if pressed[pygame.K_DELETE] or pressed[pygame.K_BACKSPACE]:
try:
self.predicted.remove(self.predicted[-1])
except:
pass
self.set_number(0)
if pressed[pygame.K_RETURN] or pressed[pygame.K_KP_ENTER]:
if len(self.predicted) == 1:
self.set_number(self.predicted[0])
self.predicted=[]
self.parent.check_for_win()
def set_number(self, number):
self.parent.pressed = True
self.correct = None
valid_input = self.parent.check_validity(self.parent.cell_board)
if not valid_input:
self.correct = False
self.value = number
def predict(self, number):
self.parent.pressed = True
if self.value == 0:
if number not in self.predicted:
self.predicted.append(number)
game = Game(config)
while game.running:
game.check_mouse()
game.check_keyboard()
if not game.win:
game.draw_background()
game.draw_board()
else:
print("WIN")
print(game.win)
exit()
pygame.display.update()
game.clock.tick(15)
for event in pygame.event.get():
if event.type == pygame.QUIT:
game.running = False
| true | true |
f726af04dc7785db1b54ef4a6b8f7b5c33ebd894 | 878 | py | Python | server/fadzmaq.py | lachierussell/FadZmaq | deb89c35df05603552ce95627ac8400c6788fbcb | [
"BSD-2-Clause"
] | 2 | 2019-09-02T06:56:46.000Z | 2019-09-15T08:43:54.000Z | server/fadzmaq.py | lachierussell/FadZmaq | deb89c35df05603552ce95627ac8400c6788fbcb | [
"BSD-2-Clause"
] | 11 | 2019-08-27T19:08:24.000Z | 2019-10-18T01:45:54.000Z | server/fadzmaq.py | lachierussell/FadZmaq | deb89c35df05603552ce95627ac8400c6788fbcb | [
"BSD-2-Clause"
] | 1 | 2019-10-25T05:42:48.000Z | 2019-10-25T05:42:48.000Z | # @file
# The application entry point. Run this file to use the FadZmaq Server.
#
# FadZmaq Project
# Professional Computing. Semester 2 2019
#
# Copyright FadZmaq © 2019 All rights reserved.
# @author Lachlan Russell 22414249@student.uwa.edu.au
# @author Jordan Russell jordanrussell@live.com
# @author Thiren Naidoo 22257963@student.uwa.edu.au
# @author Beining Chen 22384298@student.uwa.edu.au
# entry point for the api
from fadzmaq import create_app
import fadzmaq
import firebase_admin
from sqlalchemy import create_engine
app = create_app()
cred = firebase_admin.credentials.Certificate(app.config['CERT'])
fadzmaq.auth_app = firebase_admin.initialize_app(cred)
fadzmaq.engine = create_engine(app.config['DATABASE_URI'])
# only run if we are executing this script, otherwise handled by WSGI
if __name__ == "__main__":
app.run()
| 30.275862 | 71 | 0.750569 |
from fadzmaq import create_app
import fadzmaq
import firebase_admin
from sqlalchemy import create_engine
app = create_app()
cred = firebase_admin.credentials.Certificate(app.config['CERT'])
fadzmaq.auth_app = firebase_admin.initialize_app(cred)
fadzmaq.engine = create_engine(app.config['DATABASE_URI'])
if __name__ == "__main__":
app.run()
| true | true |
f726b000f7751f551512bc88f402ed4f784b69c2 | 6,428 | py | Python | dev/breeze/src/airflow_breeze/build_image/ci/build_ci_params.py | npodewitz/airflow | 511ea702d5f732582d018dad79754b54d5e53f9d | [
"Apache-2.0"
] | null | null | null | dev/breeze/src/airflow_breeze/build_image/ci/build_ci_params.py | npodewitz/airflow | 511ea702d5f732582d018dad79754b54d5e53f9d | [
"Apache-2.0"
] | null | null | null | dev/breeze/src/airflow_breeze/build_image/ci/build_ci_params.py | npodewitz/airflow | 511ea702d5f732582d018dad79754b54d5e53f9d | [
"Apache-2.0"
] | null | null | null | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Parameters for Build CI Image."""
import os
from dataclasses import dataclass
from datetime import datetime
from pathlib import Path
from typing import List, Optional
from airflow_breeze.branch_defaults import AIRFLOW_BRANCH, DEFAULT_AIRFLOW_CONSTRAINTS_BRANCH
from airflow_breeze.global_constants import get_airflow_version
from airflow_breeze.utils.console import console
from airflow_breeze.utils.path_utils import BUILD_CACHE_DIR
@dataclass
class BuildCiParams:
"""
CI build parameters. Those parameters are used to determine command issued to build CI image.
"""
upgrade_to_newer_dependencies: str = "false"
python: str = "3.7"
airflow_branch: str = AIRFLOW_BRANCH
build_id: int = 0
docker_cache: str = "pulled"
airflow_extras: str = "devel_ci"
install_providers_from_sources: bool = True
additional_airflow_extras: str = ""
additional_python_deps: str = ""
github_repository: str = "apache/airflow"
constraints_github_repository: str = "apache/airflow"
default_constraints_branch: str = DEFAULT_AIRFLOW_CONSTRAINTS_BRANCH
airflow_constraints: str = "constraints-source-providers"
airflow_constraints_reference: Optional[str] = "constraints-main"
airflow_constraints_location: Optional[str] = ""
airflow_pre_cached_pip_packages: str = "true"
login_to_github_registry: str = "false"
github_username: str = ""
dev_apt_command: str = ""
dev_apt_deps: str = ""
image_tag: Optional[str] = None
github_token: str = ""
additional_dev_apt_command: str = ""
additional_dev_apt_deps: str = ""
additional_dev_apt_env: str = ""
runtime_apt_command: str = ""
runtime_apt_deps: str = ""
additional_runtime_apt_command: str = ""
additional_runtime_apt_deps: str = ""
additional_runtime_apt_env: str = ""
platform: str = f"linux/{os.uname().machine}"
debian_version: str = "bullseye"
prepare_buildx_cache: bool = False
push_image: bool = False
empty_image: bool = False
force_build: bool = False
skip_rebuild_check: bool = False
answer: Optional[str] = None
@property
def the_image_type(self) -> str:
return 'CI'
@property
def airflow_base_image_name(self):
image = f'ghcr.io/{self.github_repository.lower()}'
return image
@property
def airflow_image_name(self):
"""Construct CI image link"""
image = f'{self.airflow_base_image_name}/{self.airflow_branch}/ci/python{self.python}'
return image
@property
def airflow_image_name_with_tag(self):
"""Construct CI image link"""
image = f'{self.airflow_base_image_name}/{self.airflow_branch}/ci/python{self.python}'
return image if self.image_tag is None else image + f":{self.image_tag}"
@property
def airflow_image_repository(self):
return f'https://github.com/{self.github_repository}'
@property
def python_base_image(self):
"""Construct Python Base Image"""
return f'python:{self.python}-slim-{self.debian_version}'
@property
def airflow_ci_local_manifest_image(self):
"""Construct CI Local Manifest Image"""
return f'local-airflow-ci-manifest/{self.airflow_branch}/python{self.python}'
@property
def airflow_ci_remote_manifest_image(self):
"""Construct CI Remote Manifest Image"""
return f'{self.airflow_image_name}/{self.airflow_branch}/ci-manifest//python:{self.python}'
@property
def airflow_image_date_created(self):
now = datetime.now()
return now.strftime("%Y-%m-%dT%H:%M:%SZ")
@property
def airflow_version(self):
return get_airflow_version()
@property
def docker_cache_directive(self) -> List[str]:
docker_cache_directive = []
if self.docker_cache == "pulled":
docker_cache_directive.append(f"--cache-from={self.airflow_image_name}")
elif self.docker_cache == "disabled":
docker_cache_directive.append("--no-cache")
else:
docker_cache_directive = []
if self.prepare_buildx_cache:
docker_cache_directive.extend(["--cache-to=type=inline,mode=max", "--push"])
return docker_cache_directive
@property
def extra_docker_build_flags(self) -> List[str]:
extra_ci_flags = []
if self.airflow_constraints_location is not None and len(self.airflow_constraints_location) > 0:
extra_ci_flags.extend(
["--build-arg", f"AIRFLOW_CONSTRAINTS_LOCATION={self.airflow_constraints_location}"]
)
return extra_ci_flags
@property
def md5sum_cache_dir(self) -> Path:
return Path(BUILD_CACHE_DIR, self.airflow_branch, self.python, "CI")
def print_info(self):
console.print(f"CI Image: {self.airflow_version} Python: {self.python}.")
REQUIRED_CI_IMAGE_ARGS = [
"python_base_image",
"airflow_version",
"airflow_branch",
"airflow_extras",
"airflow_pre_cached_pip_packages",
"additional_airflow_extras",
"additional_python_deps",
"additional_dev_apt_command",
"additional_dev_apt_deps",
"additional_dev_apt_env",
"additional_runtime_apt_command",
"additional_runtime_apt_deps",
"additional_runtime_apt_env",
"upgrade_to_newer_dependencies",
"constraints_github_repository",
"airflow_constraints_reference",
"airflow_constraints",
"airflow_image_repository",
"airflow_image_date_created",
"build_id",
]
OPTIONAL_CI_IMAGE_ARGS = [
"dev_apt_command",
"dev_apt_deps",
"runtime_apt_command",
"runtime_apt_deps",
]
| 35.125683 | 104 | 0.706752 |
import os
from dataclasses import dataclass
from datetime import datetime
from pathlib import Path
from typing import List, Optional
from airflow_breeze.branch_defaults import AIRFLOW_BRANCH, DEFAULT_AIRFLOW_CONSTRAINTS_BRANCH
from airflow_breeze.global_constants import get_airflow_version
from airflow_breeze.utils.console import console
from airflow_breeze.utils.path_utils import BUILD_CACHE_DIR
@dataclass
class BuildCiParams:
upgrade_to_newer_dependencies: str = "false"
python: str = "3.7"
airflow_branch: str = AIRFLOW_BRANCH
build_id: int = 0
docker_cache: str = "pulled"
airflow_extras: str = "devel_ci"
install_providers_from_sources: bool = True
additional_airflow_extras: str = ""
additional_python_deps: str = ""
github_repository: str = "apache/airflow"
constraints_github_repository: str = "apache/airflow"
default_constraints_branch: str = DEFAULT_AIRFLOW_CONSTRAINTS_BRANCH
airflow_constraints: str = "constraints-source-providers"
airflow_constraints_reference: Optional[str] = "constraints-main"
airflow_constraints_location: Optional[str] = ""
airflow_pre_cached_pip_packages: str = "true"
login_to_github_registry: str = "false"
github_username: str = ""
dev_apt_command: str = ""
dev_apt_deps: str = ""
image_tag: Optional[str] = None
github_token: str = ""
additional_dev_apt_command: str = ""
additional_dev_apt_deps: str = ""
additional_dev_apt_env: str = ""
runtime_apt_command: str = ""
runtime_apt_deps: str = ""
additional_runtime_apt_command: str = ""
additional_runtime_apt_deps: str = ""
additional_runtime_apt_env: str = ""
platform: str = f"linux/{os.uname().machine}"
debian_version: str = "bullseye"
prepare_buildx_cache: bool = False
push_image: bool = False
empty_image: bool = False
force_build: bool = False
skip_rebuild_check: bool = False
answer: Optional[str] = None
@property
def the_image_type(self) -> str:
return 'CI'
@property
def airflow_base_image_name(self):
image = f'ghcr.io/{self.github_repository.lower()}'
return image
@property
def airflow_image_name(self):
image = f'{self.airflow_base_image_name}/{self.airflow_branch}/ci/python{self.python}'
return image
@property
def airflow_image_name_with_tag(self):
image = f'{self.airflow_base_image_name}/{self.airflow_branch}/ci/python{self.python}'
return image if self.image_tag is None else image + f":{self.image_tag}"
@property
def airflow_image_repository(self):
return f'https://github.com/{self.github_repository}'
@property
def python_base_image(self):
return f'python:{self.python}-slim-{self.debian_version}'
@property
def airflow_ci_local_manifest_image(self):
return f'local-airflow-ci-manifest/{self.airflow_branch}/python{self.python}'
@property
def airflow_ci_remote_manifest_image(self):
return f'{self.airflow_image_name}/{self.airflow_branch}/ci-manifest//python:{self.python}'
@property
def airflow_image_date_created(self):
now = datetime.now()
return now.strftime("%Y-%m-%dT%H:%M:%SZ")
@property
def airflow_version(self):
return get_airflow_version()
@property
def docker_cache_directive(self) -> List[str]:
docker_cache_directive = []
if self.docker_cache == "pulled":
docker_cache_directive.append(f"--cache-from={self.airflow_image_name}")
elif self.docker_cache == "disabled":
docker_cache_directive.append("--no-cache")
else:
docker_cache_directive = []
if self.prepare_buildx_cache:
docker_cache_directive.extend(["--cache-to=type=inline,mode=max", "--push"])
return docker_cache_directive
@property
def extra_docker_build_flags(self) -> List[str]:
extra_ci_flags = []
if self.airflow_constraints_location is not None and len(self.airflow_constraints_location) > 0:
extra_ci_flags.extend(
["--build-arg", f"AIRFLOW_CONSTRAINTS_LOCATION={self.airflow_constraints_location}"]
)
return extra_ci_flags
@property
def md5sum_cache_dir(self) -> Path:
return Path(BUILD_CACHE_DIR, self.airflow_branch, self.python, "CI")
def print_info(self):
console.print(f"CI Image: {self.airflow_version} Python: {self.python}.")
REQUIRED_CI_IMAGE_ARGS = [
"python_base_image",
"airflow_version",
"airflow_branch",
"airflow_extras",
"airflow_pre_cached_pip_packages",
"additional_airflow_extras",
"additional_python_deps",
"additional_dev_apt_command",
"additional_dev_apt_deps",
"additional_dev_apt_env",
"additional_runtime_apt_command",
"additional_runtime_apt_deps",
"additional_runtime_apt_env",
"upgrade_to_newer_dependencies",
"constraints_github_repository",
"airflow_constraints_reference",
"airflow_constraints",
"airflow_image_repository",
"airflow_image_date_created",
"build_id",
]
OPTIONAL_CI_IMAGE_ARGS = [
"dev_apt_command",
"dev_apt_deps",
"runtime_apt_command",
"runtime_apt_deps",
]
| true | true |
f726b031d40348c933768960ba80fab387456438 | 356 | py | Python | src/grasshopper_combat.py | hcodydibble/code-katas | f02599a76ac5c3719b1e3831208126eb4b72e98d | [
"MIT"
] | null | null | null | src/grasshopper_combat.py | hcodydibble/code-katas | f02599a76ac5c3719b1e3831208126eb4b72e98d | [
"MIT"
] | null | null | null | src/grasshopper_combat.py | hcodydibble/code-katas | f02599a76ac5c3719b1e3831208126eb4b72e98d | [
"MIT"
] | null | null | null | """Grasshopper - Terminal game combat function - Return remaining health after
taking damage.
# 1 Best Practices solution by ZozoFouchtra and others
def combat(health, damage):
return max(0, health-damage)
"""
def combat(health, damage):
"""Find remaining health after taking damage."""
return 0 if health - damage < 0 else health - damage
| 25.428571 | 78 | 0.724719 |
def combat(health, damage):
return 0 if health - damage < 0 else health - damage
| true | true |
f726b1139d55db10b37ce1d0847019a581954a25 | 4,460 | py | Python | sdks/python/apache_beam/io/gcp/bigquery_avro_tools.py | eyal0/beam | 9c6922976cc2a5c6a2ef836c1986ff769cda99a5 | [
"Apache-2.0"
] | 2 | 2017-12-19T18:34:54.000Z | 2019-05-14T21:50:06.000Z | sdks/python/apache_beam/io/gcp/bigquery_avro_tools.py | eyal0/beam | 9c6922976cc2a5c6a2ef836c1986ff769cda99a5 | [
"Apache-2.0"
] | 80 | 2020-01-16T09:55:09.000Z | 2020-10-03T13:43:07.000Z | sdks/python/apache_beam/io/gcp/bigquery_avro_tools.py | eyal0/beam | 9c6922976cc2a5c6a2ef836c1986ff769cda99a5 | [
"Apache-2.0"
] | 1 | 2020-04-29T20:09:40.000Z | 2020-04-29T20:09:40.000Z | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Tools used tool work with Avro files in the context of BigQuery.
Classes, constants and functions in this file are experimental and have no
backwards compatibility guarantees.
NOTHING IN THIS FILE HAS BACKWARDS COMPATIBILITY GUARANTEES.
"""
from __future__ import absolute_import
from __future__ import division
# BigQuery types as listed in
# https://cloud.google.com/bigquery/docs/reference/standard-sql/data-types
# with aliases (RECORD, BOOLEAN, FLOAT, INTEGER) as defined in
# https://developers.google.com/resources/api-libraries/documentation/bigquery/v2/java/latest/com/google/api/services/bigquery/model/TableFieldSchema.html#setType-java.lang.String-
BIG_QUERY_TO_AVRO_TYPES = {
"STRUCT": "record",
"RECORD": "record",
"STRING": "string",
"BOOL": "boolean",
"BOOLEAN": "boolean",
"BYTES": "bytes",
"FLOAT64": "double",
"FLOAT": "double",
"INT64": "long",
"INTEGER": "long",
"TIME": {
"type": "long",
"logicalType": "time-micros",
},
"TIMESTAMP": {
"type": "long",
"logicalType": "timestamp-micros",
},
"DATE": {
"type": "int",
"logicalType": "date",
},
"DATETIME": "string",
"NUMERIC": {
"type": "bytes",
"logicalType": "decimal",
# https://cloud.google.com/bigquery/docs/reference/standard-sql/data-types#numeric-type
"precision": 38,
"scale": 9,
},
"GEOGRAPHY": "string",
}
def get_record_schema_from_dict_table_schema(
schema_name, table_schema, namespace="apache_beam.io.gcp.bigquery"):
# type: (Text, Dict[Text, Any], Text) -> Dict[Text, Any]
"""Convert a table schema into an Avro schema.
Args:
schema_name (Text): The name of the record.
table_schema (Dict[Text, Any]): A BigQuery table schema in dict form.
namespace (Text): The namespace of the Avro schema.
Returns:
Dict[Text, Any]: The schema as an Avro RecordSchema.
"""
avro_fields = [
table_field_to_avro_field(field, ".".join((namespace, schema_name)))
for field in table_schema["fields"]
]
return {
"type": "record",
"name": schema_name,
"fields": avro_fields,
"doc": "Translated Avro Schema for {}".format(schema_name),
"namespace": namespace,
}
def table_field_to_avro_field(table_field, namespace):
# type: (Dict[Text, Any], str) -> Dict[Text, Any]
"""Convert a BigQuery field to an avro field.
Args:
table_field (Dict[Text, Any]): A BigQuery field in dict form.
Returns:
Dict[Text, Any]: An equivalent Avro field in dict form.
"""
assert "type" in table_field, \
"Unable to get type for table field {}".format(table_field)
assert table_field["type"] in BIG_QUERY_TO_AVRO_TYPES, \
"Unable to map BigQuery field type {} to avro type".format(
table_field["type"])
avro_type = BIG_QUERY_TO_AVRO_TYPES[table_field["type"]]
if avro_type == "record":
element_type = get_record_schema_from_dict_table_schema(
table_field["name"],
table_field,
namespace=".".join((namespace, table_field["name"])))
else:
element_type = avro_type
field_mode = table_field.get("mode", "NULLABLE")
if field_mode in (None, "NULLABLE"):
field_type = ["null", element_type]
elif field_mode == "REQUIRED":
field_type = element_type
elif field_mode == "REPEATED":
field_type = {"type": "array", "items": element_type}
else:
raise ValueError("Unkown BigQuery field mode: {}".format(field_mode))
avro_field = {"type": field_type, "name": table_field["name"]}
doc = table_field.get("description")
if doc:
avro_field["doc"] = doc
return avro_field
| 31.631206 | 180 | 0.679596 |
from __future__ import absolute_import
from __future__ import division
{
"STRUCT": "record",
"RECORD": "record",
"STRING": "string",
"BOOL": "boolean",
"BOOLEAN": "boolean",
"BYTES": "bytes",
"FLOAT64": "double",
"FLOAT": "double",
"INT64": "long",
"INTEGER": "long",
"TIME": {
"type": "long",
"logicalType": "time-micros",
},
"TIMESTAMP": {
"type": "long",
"logicalType": "timestamp-micros",
},
"DATE": {
"type": "int",
"logicalType": "date",
},
"DATETIME": "string",
"NUMERIC": {
"type": "bytes",
"logicalType": "decimal",
cision": 38,
"scale": 9,
},
"GEOGRAPHY": "string",
}
def get_record_schema_from_dict_table_schema(
schema_name, table_schema, namespace="apache_beam.io.gcp.bigquery"):
avro_fields = [
table_field_to_avro_field(field, ".".join((namespace, schema_name)))
for field in table_schema["fields"]
]
return {
"type": "record",
"name": schema_name,
"fields": avro_fields,
"doc": "Translated Avro Schema for {}".format(schema_name),
"namespace": namespace,
}
def table_field_to_avro_field(table_field, namespace):
assert "type" in table_field, \
"Unable to get type for table field {}".format(table_field)
assert table_field["type"] in BIG_QUERY_TO_AVRO_TYPES, \
"Unable to map BigQuery field type {} to avro type".format(
table_field["type"])
avro_type = BIG_QUERY_TO_AVRO_TYPES[table_field["type"]]
if avro_type == "record":
element_type = get_record_schema_from_dict_table_schema(
table_field["name"],
table_field,
namespace=".".join((namespace, table_field["name"])))
else:
element_type = avro_type
field_mode = table_field.get("mode", "NULLABLE")
if field_mode in (None, "NULLABLE"):
field_type = ["null", element_type]
elif field_mode == "REQUIRED":
field_type = element_type
elif field_mode == "REPEATED":
field_type = {"type": "array", "items": element_type}
else:
raise ValueError("Unkown BigQuery field mode: {}".format(field_mode))
avro_field = {"type": field_type, "name": table_field["name"]}
doc = table_field.get("description")
if doc:
avro_field["doc"] = doc
return avro_field
| true | true |
f726b1967d8348b9c13635036359c128ffc392c3 | 750 | py | Python | src/language/Perl.py | fearless-spider/repo_info_extractor | fd9301d9ea637df19dcc015e70c300e2eea54a45 | [
"MIT"
] | 2 | 2019-11-27T15:21:42.000Z | 2020-12-12T15:17:42.000Z | src/language/Perl.py | fearless-spider/repo_info_extractor | fd9301d9ea637df19dcc015e70c300e2eea54a45 | [
"MIT"
] | null | null | null | src/language/Perl.py | fearless-spider/repo_info_extractor | fd9301d9ea637df19dcc015e70c300e2eea54a45 | [
"MIT"
] | null | null | null | import re
def extract_libraries(files):
"""Extracts a list of imports that were used in the files
Parameters
----------
files : []string
Full paths to files that need to be analysed
Returns
-------
dict
imports that were used in the provided files, mapped against the language
"""
res = []
# regex to find imports
regex = re.compile(r"(?:[^#]\s+)(?:use|require)[^\S\n]+(?:if.*,\s+)?[\"']?([a-zA-Z][a-zA-Z0-9:]*)[\"']?(?:\s+.*)?;")
for f in files:
with open(file=f, mode='r', errors='ignore') as fr:
contents = ' '.join(fr.readlines())
matches = regex.findall(contents)
if matches:
res.extend(matches)
return {"Perl": res}
| 24.193548 | 120 | 0.536 | import re
def extract_libraries(files):
res = []
regex = re.compile(r"(?:[^#]\s+)(?:use|require)[^\S\n]+(?:if.*,\s+)?[\"']?([a-zA-Z][a-zA-Z0-9:]*)[\"']?(?:\s+.*)?;")
for f in files:
with open(file=f, mode='r', errors='ignore') as fr:
contents = ' '.join(fr.readlines())
matches = regex.findall(contents)
if matches:
res.extend(matches)
return {"Perl": res}
| true | true |
f726b22a3876f904a6f1d950541f05c40c664edb | 4,607 | py | Python | python_gyg/tests/test_location.py | fukac99/python_gyg | 2722da1b2a858336fff584af5acc3e78135ab8a1 | [
"MIT"
] | 1 | 2019-05-22T19:37:16.000Z | 2019-05-22T19:37:16.000Z | python_gyg/tests/test_location.py | fukac99/python_gyg | 2722da1b2a858336fff584af5acc3e78135ab8a1 | [
"MIT"
] | null | null | null | python_gyg/tests/test_location.py | fukac99/python_gyg | 2722da1b2a858336fff584af5acc3e78135ab8a1 | [
"MIT"
] | null | null | null | from unittest import TestCase
import python_gyg
import datetime
GYG_API_KEY = "<your_api_key>"
class TestLocation(TestCase):
def test_is_GetYourGuide_isntance(self):
s = python_gyg.GetYourGuide(GYG_API_KEY)
self.assertTrue(isinstance(s, python_gyg.GetYourGuide))
# def test_get_location_param_error(self):
# s = python_gyg.GetYourGuide(GYG_API_KEY)
# self.assertRaises(python_gyg.RequiredParameterError, s.get_location())
#
# def test_get_location_gyg_error(self):
# s = python_gyg.GetYourGuide(GYG_API_KEY)
# self.assertRaises(python_gyg.GetYourGuideError, s.get_location(10000000))
def test_get_location_newyork(self):
s = python_gyg.GetYourGuide(GYG_API_KEY)
self.assertTrue((s.get_location(59))["data"]["locations"][0]["name"] == "New York")
class TestTour(TestCase):
# def test_get_tour_param_error(self):
# s = python_gyg.GetYourGuide(GYG_API_KEY)
# self.assertRaises(python_gyg.RequiredParameterError, s.get_tour())
#
# def test_get_tour_gyg_error(self):
# s = python_gyg.GetYourGuide(GYG_API_KEY)
# self.assertRaises(python_gyg.GetYourGuideError, s.get_tour(10000000))
def test_get_tour_newyork(self):
s = python_gyg.GetYourGuide(GYG_API_KEY)
self.assertTrue((s.get_tour(20434))["data"]["tours"][0]["title"] == "New York Photography Experience by Night")
class TestSearchTours(TestCase):
# def test_search_tour_param_error(self):
# s = python_gyg.GetYourGuide(GYG_API_KEY)
# self.assertRaises(python_gyg.RequiredParameterError, s.search_tours())
#
# def test_search_tour_bad_param_error(self):
# s = python_gyg.GetYourGuide(GYG_API_KEY)
# self.assertRaises(python_gyg.BadParameterError, s.search_tours(q="New York", date=332))
#
# def test_search_tour_bad_param_error2(self):
# s = python_gyg.GetYourGuide(GYG_API_KEY)
# self.assertRaises(python_gyg.BadParameterError, s.search_tours(q="New York", date=[42, 42]))
#
def test_search_tour_one_date(self):
s = python_gyg.GetYourGuide(GYG_API_KEY)
self.assertTrue(isinstance(s.search_tours(q="New York", date=datetime.datetime.now()), dict))
def test_search_tour_two_dates(self):
s = python_gyg.GetYourGuide(GYG_API_KEY)
self.assertTrue(isinstance(s.search_tours(q="New York",
date=[datetime.datetime.now(),
datetime.datetime.now()]),
dict))
def test_search_tour_by_coordinates(self):
s = python_gyg.GetYourGuide(GYG_API_KEY)
self.assertTrue(isinstance(s.search_tours(coordinates=[40.75, -73.97, 10]), dict))
def test_search_tour_by_categories(self):
s = python_gyg.GetYourGuide(GYG_API_KEY)
self.assertTrue(isinstance(s.search_tours(categories=[1,2]), dict))
def test_search_tour_by_location(self):
s = python_gyg.GetYourGuide(GYG_API_KEY)
self.assertTrue(isinstance(s.search_tours(location=[59, 109]), dict))
def test_search_tour_by_one_price(self):
s = python_gyg.GetYourGuide(GYG_API_KEY)
self.assertTrue(isinstance(s.search_tours(coordinates=[40.75, -73.97, 10], price=30), dict))
def test_search_tour_by_price_range(self):
s = python_gyg.GetYourGuide(GYG_API_KEY)
self.assertTrue(isinstance(s.search_tours(coordinates=[40.75, -73.97, 10], price=[30, 60]), dict))
def test_search_tour_by_max_duration(self):
s = python_gyg.GetYourGuide(GYG_API_KEY)
self.assertTrue(isinstance(s.search_tours(coordinates=[40.75, -73.97, 10], duration=120), dict))
def test_search_tour_by_duration_range(self):
s = python_gyg.GetYourGuide(GYG_API_KEY)
self.assertTrue(isinstance(s.search_tours(coordinates=[40.75, -73.97, 10], duration=[30, 260]), dict))
class TestSearchLocations(TestCase):
def test_search_locations_by_coordinates(self):
s = python_gyg.GetYourGuide(GYG_API_KEY)
self.assertTrue(isinstance(s.search_locations(coordinates=[40.75, -73.97, 10]), dict))
def test_search_locations_by_query(self):
s = python_gyg.GetYourGuide(GYG_API_KEY)
self.assertTrue(isinstance(s.search_locations(q="New York"), dict))
def test_search_locations_by_location(self):
s = python_gyg.GetYourGuide(GYG_API_KEY)
self.assertTrue(isinstance(s.search_locations(location=59), dict))
| 45.166667 | 119 | 0.682223 | from unittest import TestCase
import python_gyg
import datetime
GYG_API_KEY = "<your_api_key>"
class TestLocation(TestCase):
def test_is_GetYourGuide_isntance(self):
s = python_gyg.GetYourGuide(GYG_API_KEY)
self.assertTrue(isinstance(s, python_gyg.GetYourGuide))
def test_get_location_newyork(self):
s = python_gyg.GetYourGuide(GYG_API_KEY)
self.assertTrue((s.get_location(59))["data"]["locations"][0]["name"] == "New York")
class TestTour(TestCase):
def test_get_tour_newyork(self):
s = python_gyg.GetYourGuide(GYG_API_KEY)
self.assertTrue((s.get_tour(20434))["data"]["tours"][0]["title"] == "New York Photography Experience by Night")
class TestSearchTours(TestCase):
def test_search_tour_one_date(self):
s = python_gyg.GetYourGuide(GYG_API_KEY)
self.assertTrue(isinstance(s.search_tours(q="New York", date=datetime.datetime.now()), dict))
def test_search_tour_two_dates(self):
s = python_gyg.GetYourGuide(GYG_API_KEY)
self.assertTrue(isinstance(s.search_tours(q="New York",
date=[datetime.datetime.now(),
datetime.datetime.now()]),
dict))
def test_search_tour_by_coordinates(self):
s = python_gyg.GetYourGuide(GYG_API_KEY)
self.assertTrue(isinstance(s.search_tours(coordinates=[40.75, -73.97, 10]), dict))
def test_search_tour_by_categories(self):
s = python_gyg.GetYourGuide(GYG_API_KEY)
self.assertTrue(isinstance(s.search_tours(categories=[1,2]), dict))
def test_search_tour_by_location(self):
s = python_gyg.GetYourGuide(GYG_API_KEY)
self.assertTrue(isinstance(s.search_tours(location=[59, 109]), dict))
def test_search_tour_by_one_price(self):
s = python_gyg.GetYourGuide(GYG_API_KEY)
self.assertTrue(isinstance(s.search_tours(coordinates=[40.75, -73.97, 10], price=30), dict))
def test_search_tour_by_price_range(self):
s = python_gyg.GetYourGuide(GYG_API_KEY)
self.assertTrue(isinstance(s.search_tours(coordinates=[40.75, -73.97, 10], price=[30, 60]), dict))
def test_search_tour_by_max_duration(self):
s = python_gyg.GetYourGuide(GYG_API_KEY)
self.assertTrue(isinstance(s.search_tours(coordinates=[40.75, -73.97, 10], duration=120), dict))
def test_search_tour_by_duration_range(self):
s = python_gyg.GetYourGuide(GYG_API_KEY)
self.assertTrue(isinstance(s.search_tours(coordinates=[40.75, -73.97, 10], duration=[30, 260]), dict))
class TestSearchLocations(TestCase):
def test_search_locations_by_coordinates(self):
s = python_gyg.GetYourGuide(GYG_API_KEY)
self.assertTrue(isinstance(s.search_locations(coordinates=[40.75, -73.97, 10]), dict))
def test_search_locations_by_query(self):
s = python_gyg.GetYourGuide(GYG_API_KEY)
self.assertTrue(isinstance(s.search_locations(q="New York"), dict))
def test_search_locations_by_location(self):
s = python_gyg.GetYourGuide(GYG_API_KEY)
self.assertTrue(isinstance(s.search_locations(location=59), dict))
| true | true |
f726b366c9cf2b7cd4cfde6038b4f205fcd52e43 | 1,036 | py | Python | vyperlogix/zlib/zlibCompressor.py | raychorn/chrome_gui | f1fade70b61af12ee43c55c075aa9cfd32caa962 | [
"CC0-1.0"
] | 1 | 2020-09-29T01:36:33.000Z | 2020-09-29T01:36:33.000Z | vyperlogix/zlib/zlibCompressor.py | raychorn/chrome_gui | f1fade70b61af12ee43c55c075aa9cfd32caa962 | [
"CC0-1.0"
] | null | null | null | vyperlogix/zlib/zlibCompressor.py | raychorn/chrome_gui | f1fade70b61af12ee43c55c075aa9cfd32caa962 | [
"CC0-1.0"
] | null | null | null | import gzip, zlib, base64
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
__copyright__ = """\
(c). Copyright 2008-2020, Vyper Logix Corp., All Rights Reserved.
Published under Creative Commons License
(http://creativecommons.org/licenses/by-nc/3.0/)
restricted to non-commercial educational use only.,
http://www.VyperLogix.com for details
THE AUTHOR VYPER LOGIX CORP DISCLAIMS ALL WARRANTIES WITH REGARD TO
THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
FITNESS, IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL,
INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
WITH THE USE OR PERFORMANCE OF THIS SOFTWARE !
USE AT YOUR OWN RISK.
"""
def decompress_zlib(s):
return zlib.decompress(base64.decodestring(s), 15)
def zlib_compress(s):
return base64.encodestring(zlib.compress(s, 9))
| 31.393939 | 70 | 0.779923 | import gzip, zlib, base64
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
__copyright__ = """\
(c). Copyright 2008-2020, Vyper Logix Corp., All Rights Reserved.
Published under Creative Commons License
(http://creativecommons.org/licenses/by-nc/3.0/)
restricted to non-commercial educational use only.,
http://www.VyperLogix.com for details
THE AUTHOR VYPER LOGIX CORP DISCLAIMS ALL WARRANTIES WITH REGARD TO
THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
FITNESS, IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL,
INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
WITH THE USE OR PERFORMANCE OF THIS SOFTWARE !
USE AT YOUR OWN RISK.
"""
def decompress_zlib(s):
return zlib.decompress(base64.decodestring(s), 15)
def zlib_compress(s):
return base64.encodestring(zlib.compress(s, 9))
| true | true |
f726b54f3a46ebdee086c417557534ca46be6aee | 1,875 | py | Python | examples/pacman/independent.py | okkhoy/rlpy | af25d2011fff1d61cb7c5cc8992549808f0c6103 | [
"BSD-3-Clause"
] | 265 | 2015-01-21T08:11:12.000Z | 2021-12-21T08:06:21.000Z | examples/pacman/independent.py | okkhoy/rlpy | af25d2011fff1d61cb7c5cc8992549808f0c6103 | [
"BSD-3-Clause"
] | 22 | 2015-03-26T17:41:43.000Z | 2019-12-19T08:47:36.000Z | examples/pacman/independent.py | okkhoy/rlpy | af25d2011fff1d61cb7c5cc8992549808f0c6103 | [
"BSD-3-Clause"
] | 85 | 2015-02-18T00:25:15.000Z | 2021-11-15T11:10:00.000Z | """
Cart-pole balancing with independent discretization
"""
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from future import standard_library
standard_library.install_aliases()
from rlpy.Domains import Pacman
from rlpy.Agents import Q_Learning
from rlpy.Representations import *
from rlpy.Policies import eGreedy
from rlpy.Experiments import Experiment
import numpy as np
from hyperopt import hp
param_space = {'discretization': hp.quniform("discretization", 3, 50, 1),
'lambda_': hp.uniform("lambda_", 0., 1.),
'boyan_N0': hp.loguniform("boyan_N0", np.log(1e1), np.log(1e5)),
'initial_learn_rate': hp.loguniform("initial_learn_rate", np.log(5e-2), np.log(1))}
def make_experiment(
exp_id=1, path="./Results/Temp/{domain}/{agent}/{representation}/",
lambda_=0.9,
boyan_N0=22.36,
initial_learn_rate=.068,
discretization=9):
opt = {}
opt["path"] = path
opt["exp_id"] = exp_id
opt["max_steps"] = 150000
opt["num_policy_checks"] = 30
opt["checks_per_policy"] = 1
domain = Pacman()
opt["domain"] = domain
representation = IncrementalTabular(
domain,
discretization=discretization)
policy = eGreedy(representation, epsilon=0.1)
opt["agent"] = Q_Learning(
policy, representation, discount_factor=domain.discount_factor,
lambda_=0.9, initial_learn_rate=initial_learn_rate,
learn_rate_decay_mode="boyan", boyan_N0=boyan_N0)
experiment = Experiment(**opt)
return experiment
if __name__ == '__main__':
#from Tools.run import run_profiled
# run_profiled(make_experiment)
experiment = make_experiment(1)
experiment.run(visualize_steps=True)
experiment.plot()
# experiment.save()
| 32.894737 | 98 | 0.700267 | from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from future import standard_library
standard_library.install_aliases()
from rlpy.Domains import Pacman
from rlpy.Agents import Q_Learning
from rlpy.Representations import *
from rlpy.Policies import eGreedy
from rlpy.Experiments import Experiment
import numpy as np
from hyperopt import hp
param_space = {'discretization': hp.quniform("discretization", 3, 50, 1),
'lambda_': hp.uniform("lambda_", 0., 1.),
'boyan_N0': hp.loguniform("boyan_N0", np.log(1e1), np.log(1e5)),
'initial_learn_rate': hp.loguniform("initial_learn_rate", np.log(5e-2), np.log(1))}
def make_experiment(
exp_id=1, path="./Results/Temp/{domain}/{agent}/{representation}/",
lambda_=0.9,
boyan_N0=22.36,
initial_learn_rate=.068,
discretization=9):
opt = {}
opt["path"] = path
opt["exp_id"] = exp_id
opt["max_steps"] = 150000
opt["num_policy_checks"] = 30
opt["checks_per_policy"] = 1
domain = Pacman()
opt["domain"] = domain
representation = IncrementalTabular(
domain,
discretization=discretization)
policy = eGreedy(representation, epsilon=0.1)
opt["agent"] = Q_Learning(
policy, representation, discount_factor=domain.discount_factor,
lambda_=0.9, initial_learn_rate=initial_learn_rate,
learn_rate_decay_mode="boyan", boyan_N0=boyan_N0)
experiment = Experiment(**opt)
return experiment
if __name__ == '__main__':
experiment = make_experiment(1)
experiment.run(visualize_steps=True)
experiment.plot()
| true | true |
f726b5f40791ce2171ac294bd9cf1073746baf44 | 2,896 | py | Python | saleor/checkout/views/discount.py | dedhio/bellastore | 03cad4d11c039c6c33291021def812570c09fe36 | [
"BSD-3-Clause"
] | 3 | 2019-06-09T18:00:54.000Z | 2019-06-18T10:07:39.000Z | saleor/checkout/views/discount.py | dedhio/bellastore | 03cad4d11c039c6c33291021def812570c09fe36 | [
"BSD-3-Clause"
] | 2 | 2019-07-03T21:08:32.000Z | 2019-08-06T02:09:26.000Z | saleor/checkout/views/discount.py | dedhio/bellastore | 03cad4d11c039c6c33291021def812570c09fe36 | [
"BSD-3-Clause"
] | 1 | 2021-04-03T10:47:36.000Z | 2021-04-03T10:47:36.000Z | from datetime import date
from functools import wraps
from django.contrib import messages
from django.shortcuts import redirect
from django.template.response import TemplateResponse
from django.utils.translation import pgettext
from django.views.decorators.http import require_POST
from ...discount.models import Voucher
from ..forms import CheckoutVoucherForm
from ..models import Checkout
from ..utils import (
get_or_empty_db_checkout,
get_taxes_for_checkout,
recalculate_checkout_discount,
remove_voucher_from_checkout,
)
def add_voucher_form(view):
"""Decorate a view injecting a voucher form and handling its submission."""
@wraps(view)
def func(request, checkout):
prefix = "discount"
data = {k: v for k, v in request.POST.items() if k.startswith(prefix)}
voucher_form = CheckoutVoucherForm(
data or None, prefix=prefix, instance=checkout
)
if voucher_form.is_bound:
if voucher_form.is_valid():
voucher_form.save()
next_url = request.GET.get("next", request.META["HTTP_REFERER"])
return redirect(next_url)
else:
remove_voucher_from_checkout(checkout)
# if only discount form was used we clear post for other forms
request.POST = {}
else:
taxes = get_taxes_for_checkout(checkout, request.taxes)
recalculate_checkout_discount(checkout, request.discounts, taxes)
response = view(request, checkout)
if isinstance(response, TemplateResponse):
response.context_data["voucher_form"] = voucher_form
return response
return func
def validate_voucher(view):
"""Decorate a view making it check whether a discount voucher is valid.
If the voucher is invalid it will be removed and the user will be
redirected to the checkout summary view.
"""
@wraps(view)
def func(request, checkout):
if checkout.voucher_code:
try:
Voucher.objects.active(date=date.today()).get(
code=checkout.voucher_code
)
except Voucher.DoesNotExist:
remove_voucher_from_checkout(checkout)
msg = pgettext(
"Checkout warning",
"This voucher has expired. Please review your checkout.",
)
messages.warning(request, msg)
return redirect("checkout:summary")
return view(request, checkout)
return func
@require_POST
@get_or_empty_db_checkout(Checkout.objects.for_display())
def remove_voucher_view(request, checkout):
"""Clear the discount and remove the voucher."""
next_url = request.GET.get("next", request.META["HTTP_REFERER"])
remove_voucher_from_checkout(checkout)
return redirect(next_url)
| 34.070588 | 80 | 0.658494 | from datetime import date
from functools import wraps
from django.contrib import messages
from django.shortcuts import redirect
from django.template.response import TemplateResponse
from django.utils.translation import pgettext
from django.views.decorators.http import require_POST
from ...discount.models import Voucher
from ..forms import CheckoutVoucherForm
from ..models import Checkout
from ..utils import (
get_or_empty_db_checkout,
get_taxes_for_checkout,
recalculate_checkout_discount,
remove_voucher_from_checkout,
)
def add_voucher_form(view):
@wraps(view)
def func(request, checkout):
prefix = "discount"
data = {k: v for k, v in request.POST.items() if k.startswith(prefix)}
voucher_form = CheckoutVoucherForm(
data or None, prefix=prefix, instance=checkout
)
if voucher_form.is_bound:
if voucher_form.is_valid():
voucher_form.save()
next_url = request.GET.get("next", request.META["HTTP_REFERER"])
return redirect(next_url)
else:
remove_voucher_from_checkout(checkout)
request.POST = {}
else:
taxes = get_taxes_for_checkout(checkout, request.taxes)
recalculate_checkout_discount(checkout, request.discounts, taxes)
response = view(request, checkout)
if isinstance(response, TemplateResponse):
response.context_data["voucher_form"] = voucher_form
return response
return func
def validate_voucher(view):
@wraps(view)
def func(request, checkout):
if checkout.voucher_code:
try:
Voucher.objects.active(date=date.today()).get(
code=checkout.voucher_code
)
except Voucher.DoesNotExist:
remove_voucher_from_checkout(checkout)
msg = pgettext(
"Checkout warning",
"This voucher has expired. Please review your checkout.",
)
messages.warning(request, msg)
return redirect("checkout:summary")
return view(request, checkout)
return func
@require_POST
@get_or_empty_db_checkout(Checkout.objects.for_display())
def remove_voucher_view(request, checkout):
next_url = request.GET.get("next", request.META["HTTP_REFERER"])
remove_voucher_from_checkout(checkout)
return redirect(next_url)
| true | true |
f726b60b2e30efcf835322d9c0d038acf405f3ab | 895 | py | Python | 086.py | zlsun/ProjectEuler | 813ec545484924a052f1bd7fd90a4c676eea3bba | [
"MIT"
] | null | null | null | 086.py | zlsun/ProjectEuler | 813ec545484924a052f1bd7fd90a4c676eea3bba | [
"MIT"
] | null | null | null | 086.py | zlsun/ProjectEuler | 813ec545484924a052f1bd7fd90a4c676eea3bba | [
"MIT"
] | null | null | null | #-*- encoding: utf-8 -*-
"""
Cuboid route
A spider, S, sits in one corner of a cuboid room, measuring 6 by 5 by 3, and a fly, F, sits in the opposite corner. By travelling on the surfaces of the room the shortest "straight line" distance from S to F is 10 and the path is shown on the diagram.
However, there are up to three "shortest" path candidates for any given cuboid and the shortest route doesn't always have integer length.
It can be shown that there are exactly 2060 distinct cuboids, ignoring rotations, with integer dimensions, up to a maximum size of M by M by M, for which the shortest route has integer length when M = 100. This is the least value of M for which the number of solutions first exceeds two thousand; the number of solutions when M = 99 is 1975.
Find the least value of M such that the number of solutions first exceeds one million.
"""
from utils import *
#
| 49.722222 | 341 | 0.75419 |
from utils import *
| true | true |
f726b71c0d372ca68b0e214f1e0ae937fead58bb | 642 | py | Python | url_migration/management/commands/remove_expired_redirects.py | riklaunim/django-url-migration | 0d1115d02b64a895934ecdd7387e65b34b3d68e7 | [
"BSD-3-Clause"
] | 4 | 2017-04-28T18:58:31.000Z | 2017-10-04T07:32:47.000Z | url_migration/management/commands/remove_expired_redirects.py | riklaunim/django-url-migration | 0d1115d02b64a895934ecdd7387e65b34b3d68e7 | [
"BSD-3-Clause"
] | 3 | 2021-04-23T11:30:49.000Z | 2021-04-26T14:12:29.000Z | url_migration/management/commands/remove_expired_redirects.py | riklaunim/django-url-migration | 0d1115d02b64a895934ecdd7387e65b34b3d68e7 | [
"BSD-3-Clause"
] | 1 | 2021-04-23T11:07:36.000Z | 2021-04-23T11:07:36.000Z | from django.core.management.base import BaseCommand
from django.utils import timezone
from url_migration import models
class Command(BaseCommand):
def handle(self, **options):
for rule in models.UrlRegexpMapping.objects.filter(last_usage__isnull=False):
self._remove_if_unused(rule)
for rule in models.UrlMapping.objects.filter(last_usage__isnull=False):
self._remove_if_unused(rule)
def _remove_if_unused(self, rule):
if rule.last_usage.used_date + rule.expire_after < timezone.now():
self.stdout.write('Removing expired rule %s' % str(rule))
rule.delete()
| 35.666667 | 85 | 0.71028 | from django.core.management.base import BaseCommand
from django.utils import timezone
from url_migration import models
class Command(BaseCommand):
def handle(self, **options):
for rule in models.UrlRegexpMapping.objects.filter(last_usage__isnull=False):
self._remove_if_unused(rule)
for rule in models.UrlMapping.objects.filter(last_usage__isnull=False):
self._remove_if_unused(rule)
def _remove_if_unused(self, rule):
if rule.last_usage.used_date + rule.expire_after < timezone.now():
self.stdout.write('Removing expired rule %s' % str(rule))
rule.delete()
| true | true |
f726b73d345c483e69c29ca4afc5bfc2e99d7b7f | 4,201 | py | Python | fellowship/contract_generator.py | nokia/contract-test-framework | 67976b3361b1bb28639059720d247987ff203224 | [
"BSD-3-Clause"
] | 2 | 2021-10-05T06:47:07.000Z | 2022-03-03T23:34:50.000Z | fellowship/contract_generator.py | nokia/contract-test-framework | 67976b3361b1bb28639059720d247987ff203224 | [
"BSD-3-Clause"
] | 10 | 2021-09-02T06:58:55.000Z | 2021-12-03T19:21:39.000Z | fellowship/contract_generator.py | nokia/contract-test-framework | 67976b3361b1bb28639059720d247987ff203224 | [
"BSD-3-Clause"
] | null | null | null | # Copyright 2021 Nokia
# Licensed under the BSD 3-Clause License.
# SPDX-License-Identifier: BSD-3-Clause
import json
import logging
import os
from urllib.parse import urlparse
from genson import SchemaBuilder
from .contract_renderer import ContractRenderer
from .strict_schema_builder import StrictSchemaBuilder
LOGGER = logging.getLogger(__name__)
class ContractGenerator:
"""Generates a json contract based on a request and its expected output.
Attributes:
contract_path (str): The path of file that will be generated
type_of_contract (str): The type to write to contract in field contract_type
can be either rest or grpc. Default value: rest
"""
def __init__(self, output_path: str, type_of_contract: str = "rest") -> None:
self.contract_path = output_path
self.type_of_contract = type_of_contract
self.contract_renderer = ContractRenderer(os.path.dirname(output_path))
self.schema_builder = SchemaBuilder()
def set_type_of_schema_builder(self, type_of_schema_builder: str = "type") -> None:
"""Sets the type of schema builder can be either type or strict
Type builder is the default and generates the contract with type validation. The
Other type of builder supported is strict which will generate contract with
expected value for each field.
Args:
type_of_schema_builder (str): The type of builder to use when generating
schema can be either type or strict. Default value: type
"""
if type_of_schema_builder == "strict":
self.schema_builder = StrictSchemaBuilder()
else:
self.schema_builder = SchemaBuilder()
def generate_and_save_contract(self, request_kwargs: dict, expected_json: dict) \
-> None:
""" Function that generates a new contract and saves it to specified location
Args:
request_kwargs (dict): A dictionary that describes the request that should
return the expected_json. The dictionary needs to contain url (at least
endpoint) and method. Optional parameters include headers and data
expected_json (dict): Is the Json response that is expected when the request
from the request_kwargs dictionary is sent.
"""
contract_json = self._generate_contract(request_kwargs, expected_json)
self._save_contract(contract_json)
def _generate_contract(self, request_kwargs, expected_json):
if self.type_of_contract.lower() == "rest":
self._check_request_kwargs(request_kwargs)
self.schema_builder.add_schema({'contract_type': self.type_of_contract,
'request': {**request_kwargs}})
self.schema_builder.add_object(expected_json)
LOGGER.info("The generated schema: %s \nSaved to file: %s",
self.schema_builder.to_json(indent=4), self.contract_path)
return self.schema_builder.to_schema()
def _save_contract(self, contract_json):
with open(self.contract_path, 'w', encoding="UTF-8") as contract_file:
contract_file.write(json.dumps(contract_json,
indent=4))
self.contract_renderer.render_and_validate_contract(
os.path.basename(self.contract_path)
)
def _check_request_kwargs(self, request_kwargs):
if 'headers' not in request_kwargs:
self._add_default_headers(request_kwargs)
self._add_protocol_and_host(request_kwargs)
@staticmethod
def _add_default_headers(request_kwargs):
request_kwargs['headers'] = "{{ config.default_headers }}"
@staticmethod
def _add_protocol_and_host(request_kwargs):
uri = urlparse(request_kwargs['url'])
if not uri.netloc:
request_kwargs['url'] = "{{ config.host }}" + request_kwargs['url']
if not uri.scheme:
request_kwargs['url'] = "{{ config.protocol }}://" + request_kwargs['url']
return request_kwargs
| 42.434343 | 89 | 0.660081 |
import json
import logging
import os
from urllib.parse import urlparse
from genson import SchemaBuilder
from .contract_renderer import ContractRenderer
from .strict_schema_builder import StrictSchemaBuilder
LOGGER = logging.getLogger(__name__)
class ContractGenerator:
def __init__(self, output_path: str, type_of_contract: str = "rest") -> None:
self.contract_path = output_path
self.type_of_contract = type_of_contract
self.contract_renderer = ContractRenderer(os.path.dirname(output_path))
self.schema_builder = SchemaBuilder()
def set_type_of_schema_builder(self, type_of_schema_builder: str = "type") -> None:
if type_of_schema_builder == "strict":
self.schema_builder = StrictSchemaBuilder()
else:
self.schema_builder = SchemaBuilder()
def generate_and_save_contract(self, request_kwargs: dict, expected_json: dict) \
-> None:
contract_json = self._generate_contract(request_kwargs, expected_json)
self._save_contract(contract_json)
def _generate_contract(self, request_kwargs, expected_json):
if self.type_of_contract.lower() == "rest":
self._check_request_kwargs(request_kwargs)
self.schema_builder.add_schema({'contract_type': self.type_of_contract,
'request': {**request_kwargs}})
self.schema_builder.add_object(expected_json)
LOGGER.info("The generated schema: %s \nSaved to file: %s",
self.schema_builder.to_json(indent=4), self.contract_path)
return self.schema_builder.to_schema()
def _save_contract(self, contract_json):
with open(self.contract_path, 'w', encoding="UTF-8") as contract_file:
contract_file.write(json.dumps(contract_json,
indent=4))
self.contract_renderer.render_and_validate_contract(
os.path.basename(self.contract_path)
)
def _check_request_kwargs(self, request_kwargs):
if 'headers' not in request_kwargs:
self._add_default_headers(request_kwargs)
self._add_protocol_and_host(request_kwargs)
@staticmethod
def _add_default_headers(request_kwargs):
request_kwargs['headers'] = "{{ config.default_headers }}"
@staticmethod
def _add_protocol_and_host(request_kwargs):
uri = urlparse(request_kwargs['url'])
if not uri.netloc:
request_kwargs['url'] = "{{ config.host }}" + request_kwargs['url']
if not uri.scheme:
request_kwargs['url'] = "{{ config.protocol }}://" + request_kwargs['url']
return request_kwargs
| true | true |
f726b7dadf4efb4ce13b43f57fed824b080e01f6 | 387 | py | Python | nifty/wsgi.py | waynekyamamoto/jakobia | 04b82f620267f500d7b19937ef2631c6a840c42a | [
"Apache-2.0"
] | null | null | null | nifty/wsgi.py | waynekyamamoto/jakobia | 04b82f620267f500d7b19937ef2631c6a840c42a | [
"Apache-2.0"
] | null | null | null | nifty/wsgi.py | waynekyamamoto/jakobia | 04b82f620267f500d7b19937ef2631c6a840c42a | [
"Apache-2.0"
] | null | null | null | """
WSGI config for nifty project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'nifty.settings')
application = get_wsgi_application()
| 22.764706 | 78 | 0.782946 |
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'nifty.settings')
application = get_wsgi_application()
| true | true |
f726b86af605e3f6febf6ed5b3e113bf41b88604 | 1,158 | py | Python | tests/metrics/test_statsd_metrics.py | bear8421/thumbor | 00a0c44d44b8fa5f06c38deee7123793addda404 | [
"MIT"
] | 1 | 2021-12-24T02:01:52.000Z | 2021-12-24T02:01:52.000Z | tests/metrics/test_statsd_metrics.py | bear8421/thumbor | 00a0c44d44b8fa5f06c38deee7123793addda404 | [
"MIT"
] | 2 | 2022-03-10T22:11:18.000Z | 2022-03-16T22:42:04.000Z | tests/metrics/test_statsd_metrics.py | bear8421/thumbor | 00a0c44d44b8fa5f06c38deee7123793addda404 | [
"MIT"
] | null | null | null | #!/usr/bin/python
# -*- coding: utf-8 -*-
# thumbor imaging service
# https://github.com/thumbor/thumbor/wiki
# Licensed under the MIT license:
# http://www.opensource.org/licenses/mit-license
# Copyright (c) 2011 globo.com thumbor@googlegroups.com
from preggy import expect
import thumbor.metrics
from tests.base import TestCase
from thumbor.config import Config
from thumbor.context import Context
from thumbor.importer import Importer
class StatsdMetricsTestCase(TestCase):
def get_context(self):
conf = Config()
conf.METRICS = "thumbor.metrics.statsd_metrics"
imp = Importer(conf)
imp.import_modules()
return Context(None, conf, imp)
def test_should_initialize_metrics(self):
expect(self.context.metrics).to_be_instance_of(
thumbor.metrics.statsd_metrics.Metrics
)
def test_should_not_fail_on_use(self):
expect(self.context.metrics.incr("test.count")).not_to_be_an_error()
expect(self.context.metrics.incr("test.count", 2)).not_to_be_an_error()
expect(
self.context.metrics.timing("test.time", 100)
).not_to_be_an_error()
| 29.692308 | 79 | 0.707254 |
from preggy import expect
import thumbor.metrics
from tests.base import TestCase
from thumbor.config import Config
from thumbor.context import Context
from thumbor.importer import Importer
class StatsdMetricsTestCase(TestCase):
def get_context(self):
conf = Config()
conf.METRICS = "thumbor.metrics.statsd_metrics"
imp = Importer(conf)
imp.import_modules()
return Context(None, conf, imp)
def test_should_initialize_metrics(self):
expect(self.context.metrics).to_be_instance_of(
thumbor.metrics.statsd_metrics.Metrics
)
def test_should_not_fail_on_use(self):
expect(self.context.metrics.incr("test.count")).not_to_be_an_error()
expect(self.context.metrics.incr("test.count", 2)).not_to_be_an_error()
expect(
self.context.metrics.timing("test.time", 100)
).not_to_be_an_error()
| true | true |
f726b86fa3b46ca58a327fc2e1c4d7a5610f5095 | 1,055 | py | Python | processing/gray-scale-processing.py | rsampaths16/ReRes | 51089c806c57087eb94d9a659036ebed88e96f13 | [
"Apache-2.0"
] | 2 | 2017-12-19T07:50:25.000Z | 2018-03-26T05:59:54.000Z | processing/gray-scale-processing.py | rsampaths16/ReRes | 51089c806c57087eb94d9a659036ebed88e96f13 | [
"Apache-2.0"
] | null | null | null | processing/gray-scale-processing.py | rsampaths16/ReRes | 51089c806c57087eb94d9a659036ebed88e96f13 | [
"Apache-2.0"
] | 1 | 2020-04-26T03:12:35.000Z | 2020-04-26T03:12:35.000Z | import numpy
import scipy
import glob
from matplotlib import pyplot
from scipy import misc
from numpy import random
random.seed(0)
SIZE = 128
ORIGINAL = '../data/offline-data/black-and-white-images/original'
HIGH = '../data/offline-data/black-and-white-images/train/high'
LOW = '../data/offline-data/black-and-white-images/train/low'
def sample_patch(image):
x = random.randint(0, image.shape[0] - SIZE, dtype=numpy.int)
y = random.randint(0, image.shape[1] - SIZE, dtype=numpy.int)
high = numpy.copy(image[x:x+SIZE, y:y+SIZE])
low = numpy.copy(high)
low = misc.imresize(low, (SIZE // 4, SIZE // 4))
low = misc.imresize(low, (SIZE, SIZE))
return low, high
unique_id = 1
for image_path in glob.glob(ORIGINAL + '/*.jpg'):
print(image_path)
sample = 1
image = misc.imread(image_path)
while sample > 0:
low, high = sample_patch(image)
misc.imsave(HIGH + '/' + str(unique_id) + '.jpg', high)
misc.imsave(LOW + '/' + str(unique_id) + '.jpg', low)
sample -= 1
unique_id += 1
| 31.029412 | 65 | 0.649289 | import numpy
import scipy
import glob
from matplotlib import pyplot
from scipy import misc
from numpy import random
random.seed(0)
SIZE = 128
ORIGINAL = '../data/offline-data/black-and-white-images/original'
HIGH = '../data/offline-data/black-and-white-images/train/high'
LOW = '../data/offline-data/black-and-white-images/train/low'
def sample_patch(image):
x = random.randint(0, image.shape[0] - SIZE, dtype=numpy.int)
y = random.randint(0, image.shape[1] - SIZE, dtype=numpy.int)
high = numpy.copy(image[x:x+SIZE, y:y+SIZE])
low = numpy.copy(high)
low = misc.imresize(low, (SIZE // 4, SIZE // 4))
low = misc.imresize(low, (SIZE, SIZE))
return low, high
unique_id = 1
for image_path in glob.glob(ORIGINAL + '/*.jpg'):
print(image_path)
sample = 1
image = misc.imread(image_path)
while sample > 0:
low, high = sample_patch(image)
misc.imsave(HIGH + '/' + str(unique_id) + '.jpg', high)
misc.imsave(LOW + '/' + str(unique_id) + '.jpg', low)
sample -= 1
unique_id += 1
| true | true |
f726b8d12a6a6783daf22dfc04e130655b135796 | 5,213 | py | Python | training_3DMatch.py | aosheng1996/D3Feat | d005f3811c12764c16d4f5e9a01c6720e7e72392 | [
"MIT"
] | 1 | 2020-05-11T15:49:34.000Z | 2020-05-11T15:49:34.000Z | training_3DMatch.py | aosheng1996/D3Feat | d005f3811c12764c16d4f5e9a01c6720e7e72392 | [
"MIT"
] | null | null | null | training_3DMatch.py | aosheng1996/D3Feat | d005f3811c12764c16d4f5e9a01c6720e7e72392 | [
"MIT"
] | null | null | null | # Common libs
import time
import os
import sys
# Custom libs
from utils.config import Config
from utils.trainer import ModelTrainer
from models.KPFCNN_model import KernelPointFCNN
# Dataset
from datasets.ThreeDMatch import ThreeDMatchDataset
# ----------------------------------------------------------------------------------------------------------------------
#
# Config Class
# \******************/
#
class ThreeDMatchConfig(Config):
"""
Override the parameters you want to modify for this dataset
"""
####################
# Dataset parameters
####################
is_test = False
gpu_id = 0
dataset = '3DMatch'
# Number of CPU threads for the input pipeline
input_threads = 8
#########################
# Architecture definition
#########################
architecture = ['simple',
'resnetb',
'resnetb_strided',
'resnetb',
'resnetb_strided',
'resnetb',
'resnetb_strided',
'resnetb',
'resnetb_strided',
'resnetb',
'nearest_upsample',
'unary',
'nearest_upsample',
'unary',
'nearest_upsample',
'unary',
'nearest_upsample',
'unary',
'last_unary']
# KPConv specific parameters
num_kernel_points = 15
first_subsampling_dl = 0.03
# Density of neighborhoods for deformable convs (which need bigger radiuses). For normal conv we use KP_extent
density_parameter = 5.0
# Influence function of KPConv in ('constant', 'linear', gaussian)
KP_influence = 'linear'
KP_extent = 1.0
# Aggregation function of KPConv in ('closest', 'sum')
convolution_mode = 'sum'
# Can the network learn modulations in addition to deformations
modulated = False
# detector loss weight
det_loss_weight = 1
# Offset loss
# 'permissive' only constrains offsets inside the big radius
# 'fitting' helps deformed kernels to adapt to the geometry by penalizing distance to input points
offsets_loss = 'fitting'
offsets_decay = 0.1
# Choice of input features
in_features_dim = 1
# Batch normalization parameters
use_batch_norm = True
batch_norm_momentum = 0.98
# batch hard loss safe radius
safe_radius = 0.1
#####################
# Training parameters
#####################
# Maximal number of epochs
max_epoch = 200
# Learning rate management
learning_rate = 1e-1
momentum = 0.98
lr_decays = {i: 0.1 ** (1 / 80) for i in range(1, max_epoch)}
grad_clip_norm = 100.0
# Number of batch
batch_num = 1
# Number of keypoints
keypts_num = 64
# Number of steps per epochs (cannot be None for this dataset)
epoch_steps = 5000
# Number of validation examples per epoch
validation_size = 500
# Number of epoch between each snapshot
snapshot_gap = 1
# Augmentations
augment_scale_anisotropic = True
augment_symmetries = [False, False, False]
augment_rotation = 1
augment_scale_min = 0.9
augment_scale_max = 1.1
augment_noise = 0.005
augment_occlusion = 'none'
# Do we nee to save convergence
saving = True
saving_path = None
# ----------------------------------------------------------------------------------------------------------------------
#
# Main Call
# \***************/
#
if __name__ == '__main__':
##########################
# Initiate the environment
##########################
# Enable/Disable warnings (set level to '0'/'3')
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '0'
###########################
# Load the model parameters
###########################
config = ThreeDMatchConfig()
##############
# Prepare Data
##############
print()
print('Dataset Preparation')
print('*******************')
# Initiate dataset configuration
dataset = ThreeDMatchDataset(config.input_threads, voxel_size=config.first_subsampling_dl)
# Create subsampled input clouds
dl0 = config.first_subsampling_dl
# dataset.load_subsampled_clouds(dl0)
# Initialize input pipelines
dataset.init_input_pipeline(config)
# Test the input pipeline alone with this debug function
# dataset.check_input_pipeline_timing(config)
##############
# Define Model
##############
print('Creating Model')
print('**************\n')
t1 = time.time()
# Model class
model = KernelPointFCNN(dataset.flat_inputs, config)
# Trainer class
trainer = ModelTrainer(model)
# trainer = ModelTrainer(model, restore_snap='results/Log_/snapshots/snap-')
t2 = time.time()
print('\n----------------')
print('Done in {:.1f} s'.format(t2 - t1))
print('----------------\n')
################
# Start training
################
print('Start Training')
print('**************\n')
trainer.train(model, dataset)
| 25.0625 | 120 | 0.531939 |
import time
import os
import sys
from utils.config import Config
from utils.trainer import ModelTrainer
from models.KPFCNN_model import KernelPointFCNN
from datasets.ThreeDMatch import ThreeDMatchDataset
class ThreeDMatchConfig(Config):
False
det_loss_weight = 1
offsets_loss = 'fitting'
offsets_decay = 0.1
in_features_dim = 1
use_batch_norm = True
batch_norm_momentum = 0.98
safe_radius = 0.1
rotation = 1
augment_scale_min = 0.9
augment_scale_max = 1.1
augment_noise = 0.005
augment_occlusion = 'none'
saving = True
saving_path = None
if __name__ == '__main__':
| true | true |
f726b9d13411d60ad6b93cfd0a6545aa3baa5701 | 367 | py | Python | tests/test_models.py | inmagik/django-rest-admin | 61c0d1a993ebcf144352e0ee0f916d9e63c1ccf7 | [
"BSD-3-Clause"
] | 15 | 2015-11-13T00:22:11.000Z | 2020-02-04T12:07:05.000Z | tests/test_models.py | inmagik/django-rest-admin | 61c0d1a993ebcf144352e0ee0f916d9e63c1ccf7 | [
"BSD-3-Clause"
] | null | null | null | tests/test_models.py | inmagik/django-rest-admin | 61c0d1a993ebcf144352e0ee0f916d9e63c1ccf7 | [
"BSD-3-Clause"
] | 5 | 2015-11-13T11:23:19.000Z | 2019-08-06T18:43:58.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_django-rest-admin
------------
Tests for `django-rest-admin` models module.
"""
from django.test import TestCase
from django_rest_admin import models
class TestDjango_rest_admin(TestCase):
def setUp(self):
pass
def test_something(self):
pass
def tearDown(self):
pass
| 14.115385 | 44 | 0.640327 |
from django.test import TestCase
from django_rest_admin import models
class TestDjango_rest_admin(TestCase):
def setUp(self):
pass
def test_something(self):
pass
def tearDown(self):
pass
| true | true |
f726b9d6bccaaeb47166b01a9fa17fc6f824bd62 | 3,497 | py | Python | pypureclient/flasharray/FA_2_11/models/maintenance_window_post.py | Flav-STOR-WL/py-pure-client | 03b889c997d90380ac5d6380ca5d5432792d3e89 | [
"BSD-2-Clause"
] | 14 | 2018-12-07T18:30:27.000Z | 2022-02-22T09:12:33.000Z | pypureclient/flasharray/FA_2_11/models/maintenance_window_post.py | Flav-STOR-WL/py-pure-client | 03b889c997d90380ac5d6380ca5d5432792d3e89 | [
"BSD-2-Clause"
] | 28 | 2019-09-17T21:03:52.000Z | 2022-03-29T22:07:35.000Z | pypureclient/flasharray/FA_2_11/models/maintenance_window_post.py | Flav-STOR-WL/py-pure-client | 03b889c997d90380ac5d6380ca5d5432792d3e89 | [
"BSD-2-Clause"
] | 15 | 2020-06-11T15:50:08.000Z | 2022-03-21T09:27:25.000Z | # coding: utf-8
"""
FlashArray REST API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: 2.11
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re
import six
import typing
from ....properties import Property
if typing.TYPE_CHECKING:
from pypureclient.flasharray.FA_2_11 import models
class MaintenanceWindowPost(object):
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'timeout': 'int'
}
attribute_map = {
'timeout': 'timeout'
}
required_args = {
}
def __init__(
self,
timeout=None, # type: int
):
"""
Keyword args:
timeout (int): The specified length of time that alerts are suppressed during a maintenance window, measured in milliseconds. The maintenance window timeout value must be between `60000` (1 minute) and `86400000` (24 hours). The value entered is rounded down to the nearest minute. The `names` and `timeout` parameters must be set together, and the `names` parameter must be set to `environment`.
"""
if timeout is not None:
self.timeout = timeout
def __setattr__(self, key, value):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `MaintenanceWindowPost`".format(key))
self.__dict__[key] = value
def __getattribute__(self, item):
value = object.__getattribute__(self, item)
if isinstance(value, Property):
raise AttributeError
else:
return value
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
if hasattr(self, attr):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(MaintenanceWindowPost, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, MaintenanceWindowPost):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 31.223214 | 408 | 0.571061 |
import pprint
import re
import six
import typing
from ....properties import Property
if typing.TYPE_CHECKING:
from pypureclient.flasharray.FA_2_11 import models
class MaintenanceWindowPost(object):
swagger_types = {
'timeout': 'int'
}
attribute_map = {
'timeout': 'timeout'
}
required_args = {
}
def __init__(
self,
timeout=None,
):
if timeout is not None:
self.timeout = timeout
def __setattr__(self, key, value):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `MaintenanceWindowPost`".format(key))
self.__dict__[key] = value
def __getattribute__(self, item):
value = object.__getattribute__(self, item)
if isinstance(value, Property):
raise AttributeError
else:
return value
def to_dict(self):
result = {}
for attr, _ in six.iteritems(self.swagger_types):
if hasattr(self, attr):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(MaintenanceWindowPost, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
return pprint.pformat(self.to_dict())
def __repr__(self):
return self.to_str()
def __eq__(self, other):
if not isinstance(other, MaintenanceWindowPost):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| true | true |
f726ba59f261a358f8d57550d94c95d42ecd6359 | 1,276 | py | Python | tests/util.py | ecoal95/saltfs | 4d2596794a70919c2887688d6d116f2f5bb5cf1e | [
"Apache-2.0",
"MIT"
] | 1 | 2021-01-07T18:49:38.000Z | 2021-01-07T18:49:38.000Z | tests/util.py | ecoal95/saltfs | 4d2596794a70919c2887688d6d116f2f5bb5cf1e | [
"Apache-2.0",
"MIT"
] | null | null | null | tests/util.py | ecoal95/saltfs | 4d2596794a70919c2887688d6d116f2f5bb5cf1e | [
"Apache-2.0",
"MIT"
] | null | null | null | import os
RED = 31
GREEN = 32
BLUE = 34
MAGENTA = 35
def color(code, string):
return '\033[' + str(code) + 'm' + string + '\033[0m'
def display_path(path):
return color(MAGENTA, path)
def colon():
return color(BLUE, ':')
EXCLUDE_DIRS = ['.git', '.vagrant']
def project_path():
# One dirname for tests dir, another for project dir
project_dir = os.path.dirname(os.path.dirname(__file__))
common = os.path.commonpath([project_dir, os.getcwd()])
return project_dir.replace(common, '.', 1) # Only replace once
def paths():
for root, dirs, files in os.walk(project_path(), topdown=True):
for exclude_dir in EXCLUDE_DIRS:
if exclude_dir in dirs:
dirs.remove(exclude_dir)
for filename in files:
yield os.path.join(root, filename)
class TestResult(object):
pass
class Success(TestResult):
def __init__(self, message):
self.message = message
def is_success(self):
return True
def is_failure(self):
return False
class Failure(TestResult):
def __init__(self, message, output):
self.message = message
self.output = output
def is_success(self):
return False
def is_failure(self):
return True
| 19.333333 | 67 | 0.630094 | import os
RED = 31
GREEN = 32
BLUE = 34
MAGENTA = 35
def color(code, string):
return '\033[' + str(code) + 'm' + string + '\033[0m'
def display_path(path):
return color(MAGENTA, path)
def colon():
return color(BLUE, ':')
EXCLUDE_DIRS = ['.git', '.vagrant']
def project_path():
project_dir = os.path.dirname(os.path.dirname(__file__))
common = os.path.commonpath([project_dir, os.getcwd()])
return project_dir.replace(common, '.', 1)
def paths():
for root, dirs, files in os.walk(project_path(), topdown=True):
for exclude_dir in EXCLUDE_DIRS:
if exclude_dir in dirs:
dirs.remove(exclude_dir)
for filename in files:
yield os.path.join(root, filename)
class TestResult(object):
pass
class Success(TestResult):
def __init__(self, message):
self.message = message
def is_success(self):
return True
def is_failure(self):
return False
class Failure(TestResult):
def __init__(self, message, output):
self.message = message
self.output = output
def is_success(self):
return False
def is_failure(self):
return True
| true | true |
f726bab48ffce3b6ae7271247b2fa10be660d332 | 488 | py | Python | challenges/insertion/test_insertion.py | glasscharlie/data-structures-and-algorithms | 4546a0606334c6e3156b567d8cc82d39fb183c58 | [
"MIT"
] | null | null | null | challenges/insertion/test_insertion.py | glasscharlie/data-structures-and-algorithms | 4546a0606334c6e3156b567d8cc82d39fb183c58 | [
"MIT"
] | 4 | 2019-12-02T22:28:03.000Z | 2019-12-09T04:17:53.000Z | challenges/insertion/test_insertion.py | glasscharlie/data-structures-and-algorithms | 4546a0606334c6e3156b567d8cc82d39fb183c58 | [
"MIT"
] | null | null | null | from insertion import insertion
def test_unique_values():
lst = [8,4,23,42,16,15]
expected = [4,8,15,16,23,42]
actual = insertion(lst)
assert actual == expected
def test_duplicate_value():
lst = [8,4,23,42,16,15,8,23]
expected = [4,8,8,15,16,23,23,42]
actual = insertion(lst)
assert actual == expected
def test_negative_values():
lst = [8,4,23,-42,16,-15]
expected = [-42,-15,4,8,16,23]
actual = insertion(lst)
assert actual == expected
| 24.4 | 37 | 0.631148 | from insertion import insertion
def test_unique_values():
lst = [8,4,23,42,16,15]
expected = [4,8,15,16,23,42]
actual = insertion(lst)
assert actual == expected
def test_duplicate_value():
lst = [8,4,23,42,16,15,8,23]
expected = [4,8,8,15,16,23,23,42]
actual = insertion(lst)
assert actual == expected
def test_negative_values():
lst = [8,4,23,-42,16,-15]
expected = [-42,-15,4,8,16,23]
actual = insertion(lst)
assert actual == expected
| true | true |
f726bd9b22797e229793a51530000a11ef85bb1c | 3,516 | py | Python | src/dss/server/dss_logger.py | akraino-edge-stack/ta-distributed-state-server | bd5a0a173f1ae9c64782fbf47565cc26ed23b448 | [
"Apache-2.0"
] | null | null | null | src/dss/server/dss_logger.py | akraino-edge-stack/ta-distributed-state-server | bd5a0a173f1ae9c64782fbf47565cc26ed23b448 | [
"Apache-2.0"
] | null | null | null | src/dss/server/dss_logger.py | akraino-edge-stack/ta-distributed-state-server | bd5a0a173f1ae9c64782fbf47565cc26ed23b448 | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 Nokia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
import logging
import logging.handlers
from dss.api import dss_error
class Logger:
levels = {'debug': logging.DEBUG,
'info': logging.INFO,
'warning': logging.WARNING,
'error': logging.error}
DEST_CONSOLE = 1
DEST_SYSLOG = 2
dests = {'console': DEST_CONSOLE,
'syslog': DEST_SYSLOG}
def __init__(self, dest, verbose, level):
self.verbose = verbose
self.dest = Logger.str_to_dest(dest)
self.level = Logger.str_to_level(level)
self.init()
def init(self):
args = {}
if self.level not in Logger.levels.values():
raise dss_error.Error('Invalid level value, possible values are %s' % str(Logger.levels))
if self.dest not in Logger.dests.values():
raise dss_error.Error('Invalid destination value, possible values are %s' % str(Logger.dests))
if self.verbose:
if self.dest is Logger.DEST_CONSOLE:
args['format'] = '[%(asctime)s %(levelname)7s %(module)s(%(lineno)3s)] %(message)s'
else:
args['format'] = '[%(module)s(%(lineno)3s)] %(message)s'
else:
args['format'] = '%(message)s'
if self.dest is Logger.DEST_CONSOLE:
args['stream'] = sys.stdout
elif self.dest is Logger.DEST_SYSLOG:
logging.getLogger('').addHandler(logging.handlers.SysLogHandler(address='/dev/log'))
args['level'] = self.level
logging.basicConfig(**args)
def set_level(self, level):
self.level = Logger.str_to_level(level)
self.init()
def set_dest(self, dest):
self.dest = Logger.str_to_dest(dest)
self.init()
@staticmethod
def str_to_level(level):
ret = None
try:
ret = Logger.levels[level]
except KeyError as exp:
raise dss_error.Error('Invalid log level, possible values %s' % str(Logger.levels.keys()))
return ret
@staticmethod
def str_to_dest(dest):
ret = None
try:
ret = Logger.dests[dest]
except KeyError as exp:
raise dss_error.Error('Invalid destination, possible values %s' % str(Logger.dests.keys()))
return ret
@staticmethod
def level_to_str(level):
for key, value in Logger.levels.iteritems():
if value is level:
return key
return None
@staticmethod
def dest_to_str(dest):
for key, value in Logger.dests.iteritems():
if value is dest:
return key
return None
if __name__ == '__main__':
dest = Logger.str_to_dest('console')
level = Logger.str_to_level('debug')
logger = Logger(dest, True, level)
world='world'
logging.error('hello %s!' % world)
logging.warn('hello %s!' % world)
logging.info('hello %s!' % world)
logging.debug('hello %s!' % world)
| 30.842105 | 106 | 0.614903 |
import sys
import logging
import logging.handlers
from dss.api import dss_error
class Logger:
levels = {'debug': logging.DEBUG,
'info': logging.INFO,
'warning': logging.WARNING,
'error': logging.error}
DEST_CONSOLE = 1
DEST_SYSLOG = 2
dests = {'console': DEST_CONSOLE,
'syslog': DEST_SYSLOG}
def __init__(self, dest, verbose, level):
self.verbose = verbose
self.dest = Logger.str_to_dest(dest)
self.level = Logger.str_to_level(level)
self.init()
def init(self):
args = {}
if self.level not in Logger.levels.values():
raise dss_error.Error('Invalid level value, possible values are %s' % str(Logger.levels))
if self.dest not in Logger.dests.values():
raise dss_error.Error('Invalid destination value, possible values are %s' % str(Logger.dests))
if self.verbose:
if self.dest is Logger.DEST_CONSOLE:
args['format'] = '[%(asctime)s %(levelname)7s %(module)s(%(lineno)3s)] %(message)s'
else:
args['format'] = '[%(module)s(%(lineno)3s)] %(message)s'
else:
args['format'] = '%(message)s'
if self.dest is Logger.DEST_CONSOLE:
args['stream'] = sys.stdout
elif self.dest is Logger.DEST_SYSLOG:
logging.getLogger('').addHandler(logging.handlers.SysLogHandler(address='/dev/log'))
args['level'] = self.level
logging.basicConfig(**args)
def set_level(self, level):
self.level = Logger.str_to_level(level)
self.init()
def set_dest(self, dest):
self.dest = Logger.str_to_dest(dest)
self.init()
@staticmethod
def str_to_level(level):
ret = None
try:
ret = Logger.levels[level]
except KeyError as exp:
raise dss_error.Error('Invalid log level, possible values %s' % str(Logger.levels.keys()))
return ret
@staticmethod
def str_to_dest(dest):
ret = None
try:
ret = Logger.dests[dest]
except KeyError as exp:
raise dss_error.Error('Invalid destination, possible values %s' % str(Logger.dests.keys()))
return ret
@staticmethod
def level_to_str(level):
for key, value in Logger.levels.iteritems():
if value is level:
return key
return None
@staticmethod
def dest_to_str(dest):
for key, value in Logger.dests.iteritems():
if value is dest:
return key
return None
if __name__ == '__main__':
dest = Logger.str_to_dest('console')
level = Logger.str_to_level('debug')
logger = Logger(dest, True, level)
world='world'
logging.error('hello %s!' % world)
logging.warn('hello %s!' % world)
logging.info('hello %s!' % world)
logging.debug('hello %s!' % world)
| true | true |
f726be4df3aac1b1ae5b316e7cceff2b07cb123a | 38 | py | Python | files2md/structure_objects/structurable_directory/__init__.py | KacperKotlewski/file_structure_to_markdown | aad0e1c80f88e0b3d079cf242d43fdc4b7a369f7 | [
"MIT"
] | 1 | 2020-02-22T00:41:04.000Z | 2020-02-22T00:41:04.000Z | files2md/structure_objects/structurable_directory/__init__.py | KacperKotlewski/file_structure_to_markdown | aad0e1c80f88e0b3d079cf242d43fdc4b7a369f7 | [
"MIT"
] | null | null | null | files2md/structure_objects/structurable_directory/__init__.py | KacperKotlewski/file_structure_to_markdown | aad0e1c80f88e0b3d079cf242d43fdc4b7a369f7 | [
"MIT"
] | null | null | null | from .directoryObj import DirectoryObj | 38 | 38 | 0.894737 | from .directoryObj import DirectoryObj | true | true |
f726be65d2d58a2e1ead974e840eb9718283079e | 335 | py | Python | database/table_objects/ping_targets.py | Timo-Meinhof/friedrich-py | 025e45fe23aba0980762af779161626477c567b0 | [
"MIT"
] | 1 | 2021-08-07T12:18:48.000Z | 2021-08-07T12:18:48.000Z | database/table_objects/ping_targets.py | Timo-Meinhof/friedrich-py | 025e45fe23aba0980762af779161626477c567b0 | [
"MIT"
] | null | null | null | database/table_objects/ping_targets.py | Timo-Meinhof/friedrich-py | 025e45fe23aba0980762af779161626477c567b0 | [
"MIT"
] | null | null | null | class User:
def __init__(self, id: str, name: str, color: str, studon: str):
self.id = id
self.name = name
self.color = color
self.studon = studon
class Role:
def __init__(self, id: str, name: str, color: str):
self.id = id
self.name = name
self.color = color | 27.916667 | 69 | 0.540299 | class User:
def __init__(self, id: str, name: str, color: str, studon: str):
self.id = id
self.name = name
self.color = color
self.studon = studon
class Role:
def __init__(self, id: str, name: str, color: str):
self.id = id
self.name = name
self.color = color | true | true |
f726bec39c5c25f3abb108a6dd36bed1b16fe8f7 | 12,303 | py | Python | lenstronomy/PointSource/point_source_types.py | franyancr/lenstronomy | 3a7b33512a474bf1796d23276d9028b580580cf1 | [
"MIT"
] | null | null | null | lenstronomy/PointSource/point_source_types.py | franyancr/lenstronomy | 3a7b33512a474bf1796d23276d9028b580580cf1 | [
"MIT"
] | null | null | null | lenstronomy/PointSource/point_source_types.py | franyancr/lenstronomy | 3a7b33512a474bf1796d23276d9028b580580cf1 | [
"MIT"
] | null | null | null | import numpy as np
from lenstronomy.LensModel.Solver.lens_equation_solver import LensEquationSolver
class Unlensed(object):
"""
class of a single point source in the image plane, aka star
parameters: ra_image, dec_image, point_amp
"""
def __init__(self):
pass
def image_position(self, kwargs_ps, kwargs_lens=None, **kwargs): # kwargs_lens=None, min_distance=0.01, search_window=5, precision_limit=10**(-10), num_iter_max=100, x_center=0, y_center=0, magnification_limit=None):
"""
:param ra_image:
:param dec_image:
:param point_amp:
:return:
"""
ra_image = kwargs_ps['ra_image']
dec_image = kwargs_ps['dec_image']
return np.array(ra_image), np.array(dec_image)
def source_position(self, kwargs_ps, kwargs_lens=None):
ra_image = kwargs_ps['ra_image']
dec_image = kwargs_ps['dec_image']
return np.array(ra_image), np.array(dec_image)
def image_amplitude(self, kwargs_ps, kwargs_lens=None, **kwargs): # , x_pos=None, y_pos=None, min_distance=0.01, search_window=5, precision_limit=10**(-10), num_iter_max=100, x_center=0, y_center=0, magnification_limit=None):
point_amp = kwargs_ps['point_amp']
return np.array(point_amp)
def source_amplitude(self, kwargs_ps, kwargs_lens=None):
point_amp = kwargs_ps['point_amp']
return np.array(point_amp)
def update_lens_model(self, lens_model_class):
pass
class LensedPositions(object):
"""
class of a single point source in the image plane, aka star
parameters: ra_image, dec_image, point_amp
"""
def __init__(self, lensModel, fixed_magnification=False, additional_image=False):
self._lensModel = lensModel
self._solver = LensEquationSolver(lensModel)
self._fixed_magnification = fixed_magnification
self._additional_image = additional_image
if fixed_magnification is True and additional_image is True:
Warning('The combination of fixed_magnification=True and additional_image=True is not optimal for the current computation.'
'If you see this warning, please approach the developers.')
def image_position(self, kwargs_ps, kwargs_lens, min_distance=0.01, search_window=5, precision_limit=10**(-10),
num_iter_max=100, x_center=0, y_center=0, magnification_limit=None):
"""
:param ra_image:
:param dec_image:
:param point_amp:
:return:
"""
if self._additional_image is True:
ra_source, dec_source = self.source_position(kwargs_ps, kwargs_lens)
ra_image, dec_image = self._solver.image_position_from_source(ra_source, dec_source, kwargs_lens,
min_distance=min_distance,
search_window=search_window,
precision_limit=precision_limit,
num_iter_max=num_iter_max, x_center=x_center,
y_center=y_center, magnification_limit=magnification_limit)
else:
ra_image = kwargs_ps['ra_image']
dec_image = kwargs_ps['dec_image']
return np.array(ra_image), np.array(dec_image)
def source_position(self, kwargs_ps, kwargs_lens):
ra_image = kwargs_ps['ra_image']
dec_image = kwargs_ps['dec_image']
x_source, y_source = self._lensModel.ray_shooting(ra_image, dec_image, kwargs_lens)
x_source = np.mean(x_source)
y_source = np.mean(y_source)
return np.array(x_source), np.array(y_source)
def image_amplitude(self, kwargs_ps, kwargs_lens=None, x_pos=None, y_pos=None, **kwargs): # min_distance=0.01, search_window=5, precision_limit=10**(-10),num_iter_max=100, x_center=0, y_center=0):
if self._fixed_magnification:
if x_pos is not None and y_pos is not None:
ra_image, dec_image = x_pos, y_pos
else:
ra_image, dec_image = self.image_position(kwargs_ps, kwargs_lens)
mag = self._lensModel.magnification(ra_image, dec_image, kwargs_lens)
point_amp = kwargs_ps['source_amp'] * np.abs(mag)
else:
point_amp = kwargs_ps['point_amp']
if x_pos is not None:
point_amp = _expand_to_array(point_amp, len(x_pos))
#if np.atleast_1d(point_amp):
# pass
return np.array(point_amp)
def source_amplitude(self, kwargs_ps, kwargs_lens=None):
if self._fixed_magnification:
source_amp = kwargs_ps['source_amp']
else:
ra_image, dec_image = kwargs_ps['ra_image'], kwargs_ps['dec_image']
mag = self._lensModel.magnification(ra_image, dec_image, kwargs_lens)
point_amp = kwargs_ps['point_amp']
source_amp = np.mean(np.array(point_amp) / np.array(np.abs(mag)))
return np.array(source_amp)
def update_lens_model(self, lens_model_class):
self._lensModel = lens_model_class
self._solver = LensEquationSolver(lens_model_class)
class SourcePositions(object):
"""
class of a single point source in the image plane, aka star
parameters: ra_image, dec_image, point_amp
"""
def __init__(self, lensModel, fixed_magnification=True):
self._lensModel = lensModel
self._solver = LensEquationSolver(lensModel)
self._fixed_magnification = fixed_magnification
def image_position(self, kwargs_ps, kwargs_lens, min_distance=0.01, search_window=5, precision_limit=10**(-10),
num_iter_max=100, x_center=0, y_center=0, magnification_limit=None):
"""
:param ra_image:
:param dec_image:
:param point_amp:
:return:
"""
ra_source, dec_source = self.source_position(kwargs_ps, kwargs_lens)
ra_image, dec_image = self._solver.image_position_from_source(ra_source, dec_source, kwargs_lens,
min_distance=min_distance,
search_window=search_window,
precision_limit=precision_limit,
num_iter_max=num_iter_max, x_center=x_center,
y_center=y_center, magnification_limit=magnification_limit)
return ra_image, dec_image
def source_position(self, kwargs_ps, kwargs_lens=None):
ra_source = kwargs_ps['ra_source']
dec_source = kwargs_ps['dec_source']
return np.array(ra_source), np.array(dec_source)
def image_amplitude(self, kwargs_ps, kwargs_lens=None, x_pos=None, y_pos=None, min_distance=0.01, search_window=5,
precision_limit=10**(-10), num_iter_max=100, x_center=0, y_center=0, magnification_limit=None):
if self._fixed_magnification:
if x_pos is not None and y_pos is not None:
ra_image, dec_image = x_pos, y_pos
else:
ra_image, dec_image = self.image_position(kwargs_ps, kwargs_lens, min_distance=min_distance,
search_window=search_window,
precision_limit=precision_limit,
num_iter_max=num_iter_max, x_center=x_center,
y_center=y_center, magnification_limit=magnification_limit)
mag = self._lensModel.magnification(ra_image, dec_image, kwargs_lens)
point_amp = kwargs_ps['source_amp'] * np.abs(mag)
else:
point_amp = kwargs_ps['point_amp']
if x_pos is not None:
point_amp = _expand_to_array(point_amp, len(x_pos))
return np.array(point_amp)
def source_amplitude(self, kwargs_ps, kwargs_lens=None):
if self._fixed_magnification:
source_amp = kwargs_ps['source_amp']
else:
ra_image, dec_image = self.image_position(kwargs_ps, kwargs_lens)
mag = self._lensModel.magnification(ra_image, dec_image, kwargs_lens)
point_amp = kwargs_ps['point_amp']
source_amp = np.mean(np.array(point_amp) / np.array(mag))
return np.array(source_amp)
def update_lens_model(self, lens_model_class):
self._lensModel = lens_model_class
self._solver = LensEquationSolver(lens_model_class)
class PointSourceCached(object):
"""
"""
def __init__(self, point_source_model, save_cache=False):
self._model = point_source_model
self._save_cache = save_cache
def delete_lens_model_cache(self):
if hasattr(self, '_x_image'):
del self._x_image
if hasattr(self, '_y_image'):
del self._y_image
if hasattr(self, '_x_source'):
del self._x_source
if hasattr(self, '_y_source'):
del self._y_source
def set_save_cache(self, bool):
self._save_cache = bool
def update_lens_model(self, lens_model_class):
self._model.update_lens_model(lens_model_class)
def image_position(self, kwargs_ps, kwargs_lens=None, min_distance=0.05, search_window=10,
precision_limit=10**(-10), num_iter_max=100, x_center=0, y_center=0, magnification_limit=None):
"""
:param ra_image:
:param dec_image:
:param point_amp:
:return:
"""
if not self._save_cache or not hasattr(self, '_x_image') or not hasattr(self, '_y_image'):
self._x_image, self._y_image = self._model.image_position(kwargs_ps, kwargs_lens, min_distance=min_distance,
search_window=search_window,
precision_limit=precision_limit,
num_iter_max=num_iter_max, x_center=x_center,
y_center=y_center, magnification_limit=magnification_limit)
return self._x_image, self._y_image
def source_position(self, kwargs_ps, kwargs_lens=None):
if not self._save_cache or not hasattr(self, '_x_source') or not hasattr(self, '_y_source'):
self._x_source, self._y_source = self._model.source_position(kwargs_ps, kwargs_lens)
return self._x_source, self._y_source
def image_amplitude(self, kwargs_ps, kwargs_lens=None, min_distance=0.01, search_window=5, precision_limit=10**(-10),
num_iter_max=100, x_center=0, y_center=0, magnification_limit=None):
x_pos, y_pos = self.image_position(kwargs_ps, kwargs_lens, min_distance=min_distance,
search_window=search_window,
precision_limit=precision_limit,
num_iter_max=num_iter_max, x_center=x_center,
y_center=y_center, magnification_limit=magnification_limit)
return self._model.image_amplitude(kwargs_ps, kwargs_lens, x_pos=x_pos, y_pos=y_pos)
def source_amplitude(self, kwargs_ps, kwargs_lens=None):
return self._model.source_amplitude(kwargs_ps, kwargs_lens)
def _expand_to_array(array, num):
"""
:param array: float/int or numpy array
:param num: number of array entries expected in array
:return: array of size num
"""
if np.isscalar(array):
return np.ones(num) * array
elif len(array) < num:
out = np.zeros(num)
out[0:len(array)] = array
return out
else:
return array | 47.137931 | 231 | 0.599366 | import numpy as np
from lenstronomy.LensModel.Solver.lens_equation_solver import LensEquationSolver
class Unlensed(object):
def __init__(self):
pass
def image_position(self, kwargs_ps, kwargs_lens=None, **kwargs):
ra_image = kwargs_ps['ra_image']
dec_image = kwargs_ps['dec_image']
return np.array(ra_image), np.array(dec_image)
def source_position(self, kwargs_ps, kwargs_lens=None):
ra_image = kwargs_ps['ra_image']
dec_image = kwargs_ps['dec_image']
return np.array(ra_image), np.array(dec_image)
def image_amplitude(self, kwargs_ps, kwargs_lens=None, **kwargs):
point_amp = kwargs_ps['point_amp']
return np.array(point_amp)
def source_amplitude(self, kwargs_ps, kwargs_lens=None):
point_amp = kwargs_ps['point_amp']
return np.array(point_amp)
def update_lens_model(self, lens_model_class):
pass
class LensedPositions(object):
def __init__(self, lensModel, fixed_magnification=False, additional_image=False):
self._lensModel = lensModel
self._solver = LensEquationSolver(lensModel)
self._fixed_magnification = fixed_magnification
self._additional_image = additional_image
if fixed_magnification is True and additional_image is True:
Warning('The combination of fixed_magnification=True and additional_image=True is not optimal for the current computation.'
'If you see this warning, please approach the developers.')
def image_position(self, kwargs_ps, kwargs_lens, min_distance=0.01, search_window=5, precision_limit=10**(-10),
num_iter_max=100, x_center=0, y_center=0, magnification_limit=None):
if self._additional_image is True:
ra_source, dec_source = self.source_position(kwargs_ps, kwargs_lens)
ra_image, dec_image = self._solver.image_position_from_source(ra_source, dec_source, kwargs_lens,
min_distance=min_distance,
search_window=search_window,
precision_limit=precision_limit,
num_iter_max=num_iter_max, x_center=x_center,
y_center=y_center, magnification_limit=magnification_limit)
else:
ra_image = kwargs_ps['ra_image']
dec_image = kwargs_ps['dec_image']
return np.array(ra_image), np.array(dec_image)
def source_position(self, kwargs_ps, kwargs_lens):
ra_image = kwargs_ps['ra_image']
dec_image = kwargs_ps['dec_image']
x_source, y_source = self._lensModel.ray_shooting(ra_image, dec_image, kwargs_lens)
x_source = np.mean(x_source)
y_source = np.mean(y_source)
return np.array(x_source), np.array(y_source)
def image_amplitude(self, kwargs_ps, kwargs_lens=None, x_pos=None, y_pos=None, **kwargs):
if self._fixed_magnification:
if x_pos is not None and y_pos is not None:
ra_image, dec_image = x_pos, y_pos
else:
ra_image, dec_image = self.image_position(kwargs_ps, kwargs_lens)
mag = self._lensModel.magnification(ra_image, dec_image, kwargs_lens)
point_amp = kwargs_ps['source_amp'] * np.abs(mag)
else:
point_amp = kwargs_ps['point_amp']
if x_pos is not None:
point_amp = _expand_to_array(point_amp, len(x_pos))
return np.array(point_amp)
def source_amplitude(self, kwargs_ps, kwargs_lens=None):
if self._fixed_magnification:
source_amp = kwargs_ps['source_amp']
else:
ra_image, dec_image = kwargs_ps['ra_image'], kwargs_ps['dec_image']
mag = self._lensModel.magnification(ra_image, dec_image, kwargs_lens)
point_amp = kwargs_ps['point_amp']
source_amp = np.mean(np.array(point_amp) / np.array(np.abs(mag)))
return np.array(source_amp)
def update_lens_model(self, lens_model_class):
self._lensModel = lens_model_class
self._solver = LensEquationSolver(lens_model_class)
class SourcePositions(object):
def __init__(self, lensModel, fixed_magnification=True):
self._lensModel = lensModel
self._solver = LensEquationSolver(lensModel)
self._fixed_magnification = fixed_magnification
def image_position(self, kwargs_ps, kwargs_lens, min_distance=0.01, search_window=5, precision_limit=10**(-10),
num_iter_max=100, x_center=0, y_center=0, magnification_limit=None):
ra_source, dec_source = self.source_position(kwargs_ps, kwargs_lens)
ra_image, dec_image = self._solver.image_position_from_source(ra_source, dec_source, kwargs_lens,
min_distance=min_distance,
search_window=search_window,
precision_limit=precision_limit,
num_iter_max=num_iter_max, x_center=x_center,
y_center=y_center, magnification_limit=magnification_limit)
return ra_image, dec_image
def source_position(self, kwargs_ps, kwargs_lens=None):
ra_source = kwargs_ps['ra_source']
dec_source = kwargs_ps['dec_source']
return np.array(ra_source), np.array(dec_source)
def image_amplitude(self, kwargs_ps, kwargs_lens=None, x_pos=None, y_pos=None, min_distance=0.01, search_window=5,
precision_limit=10**(-10), num_iter_max=100, x_center=0, y_center=0, magnification_limit=None):
if self._fixed_magnification:
if x_pos is not None and y_pos is not None:
ra_image, dec_image = x_pos, y_pos
else:
ra_image, dec_image = self.image_position(kwargs_ps, kwargs_lens, min_distance=min_distance,
search_window=search_window,
precision_limit=precision_limit,
num_iter_max=num_iter_max, x_center=x_center,
y_center=y_center, magnification_limit=magnification_limit)
mag = self._lensModel.magnification(ra_image, dec_image, kwargs_lens)
point_amp = kwargs_ps['source_amp'] * np.abs(mag)
else:
point_amp = kwargs_ps['point_amp']
if x_pos is not None:
point_amp = _expand_to_array(point_amp, len(x_pos))
return np.array(point_amp)
def source_amplitude(self, kwargs_ps, kwargs_lens=None):
if self._fixed_magnification:
source_amp = kwargs_ps['source_amp']
else:
ra_image, dec_image = self.image_position(kwargs_ps, kwargs_lens)
mag = self._lensModel.magnification(ra_image, dec_image, kwargs_lens)
point_amp = kwargs_ps['point_amp']
source_amp = np.mean(np.array(point_amp) / np.array(mag))
return np.array(source_amp)
def update_lens_model(self, lens_model_class):
self._lensModel = lens_model_class
self._solver = LensEquationSolver(lens_model_class)
class PointSourceCached(object):
def __init__(self, point_source_model, save_cache=False):
self._model = point_source_model
self._save_cache = save_cache
def delete_lens_model_cache(self):
if hasattr(self, '_x_image'):
del self._x_image
if hasattr(self, '_y_image'):
del self._y_image
if hasattr(self, '_x_source'):
del self._x_source
if hasattr(self, '_y_source'):
del self._y_source
def set_save_cache(self, bool):
self._save_cache = bool
def update_lens_model(self, lens_model_class):
self._model.update_lens_model(lens_model_class)
def image_position(self, kwargs_ps, kwargs_lens=None, min_distance=0.05, search_window=10,
precision_limit=10**(-10), num_iter_max=100, x_center=0, y_center=0, magnification_limit=None):
if not self._save_cache or not hasattr(self, '_x_image') or not hasattr(self, '_y_image'):
self._x_image, self._y_image = self._model.image_position(kwargs_ps, kwargs_lens, min_distance=min_distance,
search_window=search_window,
precision_limit=precision_limit,
num_iter_max=num_iter_max, x_center=x_center,
y_center=y_center, magnification_limit=magnification_limit)
return self._x_image, self._y_image
def source_position(self, kwargs_ps, kwargs_lens=None):
if not self._save_cache or not hasattr(self, '_x_source') or not hasattr(self, '_y_source'):
self._x_source, self._y_source = self._model.source_position(kwargs_ps, kwargs_lens)
return self._x_source, self._y_source
def image_amplitude(self, kwargs_ps, kwargs_lens=None, min_distance=0.01, search_window=5, precision_limit=10**(-10),
num_iter_max=100, x_center=0, y_center=0, magnification_limit=None):
x_pos, y_pos = self.image_position(kwargs_ps, kwargs_lens, min_distance=min_distance,
search_window=search_window,
precision_limit=precision_limit,
num_iter_max=num_iter_max, x_center=x_center,
y_center=y_center, magnification_limit=magnification_limit)
return self._model.image_amplitude(kwargs_ps, kwargs_lens, x_pos=x_pos, y_pos=y_pos)
def source_amplitude(self, kwargs_ps, kwargs_lens=None):
return self._model.source_amplitude(kwargs_ps, kwargs_lens)
def _expand_to_array(array, num):
if np.isscalar(array):
return np.ones(num) * array
elif len(array) < num:
out = np.zeros(num)
out[0:len(array)] = array
return out
else:
return array | true | true |
f726bf47700172a8e614d87926cc30cfdb491818 | 2,508 | py | Python | linkedlist.py | jerrybelmonte/DataStructures-Python | 553a156f685d83291e73e0c35b85167e6b114379 | [
"MIT"
] | null | null | null | linkedlist.py | jerrybelmonte/DataStructures-Python | 553a156f685d83291e73e0c35b85167e6b114379 | [
"MIT"
] | null | null | null | linkedlist.py | jerrybelmonte/DataStructures-Python | 553a156f685d83291e73e0c35b85167e6b114379 | [
"MIT"
] | null | null | null | # LinkedList implementation using a helper Element class
class Element(object):
def __init__(self, value):
self.value = value
self.next = None
class LinkedList(object):
def __init__(self, head=None):
self.head = head
def append(self, new_element):
current = self.head
if self.head:
while current.next:
current = current.next
current.next = new_element
else:
self.head = new_element
def get_position(self, position):
index = 1
current = self.head
if self.head:
while current.next and index < position:
current = current.next
index += 1
if index == position:
return current
else:
return None
def insert(self, new_element, position):
index = 1
current = self.head
previous = current
if position != 1:
while current.next and index < position:
previous = current
current = current.next
index += 1
if index == position:
new_element.next = current
previous.next = new_element
else:
if self.head:
new_element.next = current
self.head = new_element
else:
self.head = new_element
def delete(self, value):
current = self.head
if self.head:
if current.value == value:
self.head = current.next
current.next = None
else:
while current.next:
previous = current
current = current.next
if current.value == value:
previous.next = current.next
current.next = None
# Test cases
# Set up some Elements
e1 = Element(1)
e2 = Element(2)
e3 = Element(3)
e4 = Element(4)
# Start setting up a LinkedList
ll = LinkedList(e1)
ll.append(e2)
ll.append(e3)
# Test get_position
# Output should print 3
print(ll.head.next.next.value)
# Output should also print 3
print(ll.get_position(3).value)
# Test insert
ll.insert(e4, 3)
# Output should print 4 now
print(ll.get_position(3).value)
# Test delete
ll.delete(1)
# Output should print 2 now
print(ll.get_position(1).value)
# Output should print 4 now
print(ll.get_position(2).value)
# Should print 3 now
print(ll.get_position(3).value)
| 25.591837 | 56 | 0.555821 |
class Element(object):
def __init__(self, value):
self.value = value
self.next = None
class LinkedList(object):
def __init__(self, head=None):
self.head = head
def append(self, new_element):
current = self.head
if self.head:
while current.next:
current = current.next
current.next = new_element
else:
self.head = new_element
def get_position(self, position):
index = 1
current = self.head
if self.head:
while current.next and index < position:
current = current.next
index += 1
if index == position:
return current
else:
return None
def insert(self, new_element, position):
index = 1
current = self.head
previous = current
if position != 1:
while current.next and index < position:
previous = current
current = current.next
index += 1
if index == position:
new_element.next = current
previous.next = new_element
else:
if self.head:
new_element.next = current
self.head = new_element
else:
self.head = new_element
def delete(self, value):
current = self.head
if self.head:
if current.value == value:
self.head = current.next
current.next = None
else:
while current.next:
previous = current
current = current.next
if current.value == value:
previous.next = current.next
current.next = None
e1 = Element(1)
e2 = Element(2)
e3 = Element(3)
e4 = Element(4)
ll = LinkedList(e1)
ll.append(e2)
ll.append(e3)
print(ll.head.next.next.value)
print(ll.get_position(3).value)
ll.insert(e4, 3)
print(ll.get_position(3).value)
ll.delete(1)
print(ll.get_position(1).value)
print(ll.get_position(2).value)
print(ll.get_position(3).value)
| true | true |
f726c0fcd2d691576e784c9f9ad8d9f54ceb42ec | 4,000 | py | Python | benchmarks/supervectorizer_tuning.py | dirty-cat/categorical-encoding | fb0a1c4216533034e7516efc0698c7e4477b0243 | [
"BSD-3-Clause"
] | 374 | 2018-03-16T09:00:55.000Z | 2022-03-31T14:07:43.000Z | benchmarks/supervectorizer_tuning.py | dirty-cat/categorical-encoding | fb0a1c4216533034e7516efc0698c7e4477b0243 | [
"BSD-3-Clause"
] | 195 | 2018-03-14T13:56:25.000Z | 2022-03-31T11:49:49.000Z | benchmarks/supervectorizer_tuning.py | dirty-cat/categorical-encoding | fb0a1c4216533034e7516efc0698c7e4477b0243 | [
"BSD-3-Clause"
] | 52 | 2018-03-13T13:23:01.000Z | 2022-03-17T09:56:56.000Z | """
Performs a GridSearch to find the best parameters for the SuperVectorizer
among a selection.
"""
import logging
import pandas as pd
from sklearn.ensemble import RandomForestRegressor, RandomForestClassifier
from sklearn.model_selection import GridSearchCV
from sklearn.pipeline import Pipeline
from dirty_cat import SuperVectorizer
from dirty_cat.datasets import fetch_open_payments, fetch_drug_directory, \
fetch_road_safety, fetch_midwest_survey, fetch_medical_charge, \
fetch_employee_salaries, fetch_traffic_violations
from pathlib import Path
from functools import wraps
from datetime import datetime
from typing import List, Tuple
def get_classification_datasets() -> List[Tuple[dict, str]]:
return [
(fetch_open_payments(), 'open_payments'),
# (fetch_drug_directory(), 'drug_directory),
(fetch_road_safety(), 'road_safety'),
(fetch_midwest_survey(), 'midwest_survey'),
(fetch_traffic_violations(), 'traffic_violations'),
]
def get_regression_datasets() -> List[Tuple[dict, str]]:
return [
(fetch_medical_charge(), 'medical_charge'),
(fetch_employee_salaries(), 'employee_salaries'),
]
def get_dataset(info) -> Tuple[pd.DataFrame, pd.Series]:
df = pd.read_csv(info['path'], **info['read_csv_kwargs'])
y = df[info['y']]
X = df.drop(info['y'], axis=1).astype(str)
return X, y
def set_logging(func):
@wraps(func)
def wrapper(*args, **kwargs):
logging_level = logging.DEBUG
logger = logging.getLogger()
logger.setLevel(logging_level)
formatter = logging.Formatter('%(asctime)s - [%(levelname)s] %(message)s')
formatter.datefmt = '%m/%d/%Y %H:%M:%S'
path = Path(__file__).parent / f'tuning_{str(datetime.now())[:10]}.log'
fh = logging.FileHandler(filename=path, mode='w')
fh.setLevel(logging_level)
fh.setFormatter(formatter)
# sh = logging.StreamHandler(sys.stdout)
# sh.setLevel(logging_level)
# sh.setFormatter(formatter)
logger.addHandler(fh)
# logger.addHandler(sh)
return func(*args, **kwargs)
return wrapper
@set_logging
def main():
logging.info('Launching !')
card_possibilities = [20, 30, 40, 50]
n_comp_possibilities = [10, 30, 50]
logging.debug('Creating pipelines')
regression_pipeline = Pipeline([
('sv', SuperVectorizer()),
('estimator', RandomForestRegressor()),
])
classification_pipeline = Pipeline([
('sv', SuperVectorizer()),
('estimator', RandomForestClassifier()),
])
logging.debug(f'With cardinality possibilities: {card_possibilities} '
f'and n_components possibilities: {n_comp_possibilities}')
for pipeline, datasets in zip(
[
regression_pipeline,
classification_pipeline,
],
[
get_regression_datasets(),
get_classification_datasets(),
]
):
for info, name in datasets:
X, y = get_dataset(info)
if name != 'traffic_violations':
continue
csv_path = Path('.').resolve() / f'{name}_results.csv'
if csv_path.exists():
# If the results already exist, we'll skip to the next
logging.debug(f'Skipping {name} as {csv_path!s} was found')
continue
logging.debug(f'Running search on {name}')
grid = GridSearchCV(
estimator=pipeline,
param_grid={
'sv__cardinality_threshold': card_possibilities,
'sv__high_card_str_transformer__n_components': n_comp_possibilities,
},
n_jobs=30,
)
grid.fit(X, y)
df = pd.DataFrame(grid.cv_results_)
df.to_csv(csv_path)
logging.info(f'Saved search results in {csv_path!s}')
if __name__ == '__main__':
main()
| 29.850746 | 88 | 0.6265 |
import logging
import pandas as pd
from sklearn.ensemble import RandomForestRegressor, RandomForestClassifier
from sklearn.model_selection import GridSearchCV
from sklearn.pipeline import Pipeline
from dirty_cat import SuperVectorizer
from dirty_cat.datasets import fetch_open_payments, fetch_drug_directory, \
fetch_road_safety, fetch_midwest_survey, fetch_medical_charge, \
fetch_employee_salaries, fetch_traffic_violations
from pathlib import Path
from functools import wraps
from datetime import datetime
from typing import List, Tuple
def get_classification_datasets() -> List[Tuple[dict, str]]:
return [
(fetch_open_payments(), 'open_payments'),
(fetch_road_safety(), 'road_safety'),
(fetch_midwest_survey(), 'midwest_survey'),
(fetch_traffic_violations(), 'traffic_violations'),
]
def get_regression_datasets() -> List[Tuple[dict, str]]:
return [
(fetch_medical_charge(), 'medical_charge'),
(fetch_employee_salaries(), 'employee_salaries'),
]
def get_dataset(info) -> Tuple[pd.DataFrame, pd.Series]:
df = pd.read_csv(info['path'], **info['read_csv_kwargs'])
y = df[info['y']]
X = df.drop(info['y'], axis=1).astype(str)
return X, y
def set_logging(func):
@wraps(func)
def wrapper(*args, **kwargs):
logging_level = logging.DEBUG
logger = logging.getLogger()
logger.setLevel(logging_level)
formatter = logging.Formatter('%(asctime)s - [%(levelname)s] %(message)s')
formatter.datefmt = '%m/%d/%Y %H:%M:%S'
path = Path(__file__).parent / f'tuning_{str(datetime.now())[:10]}.log'
fh = logging.FileHandler(filename=path, mode='w')
fh.setLevel(logging_level)
fh.setFormatter(formatter)
# sh = logging.StreamHandler(sys.stdout)
# sh.setLevel(logging_level)
# sh.setFormatter(formatter)
logger.addHandler(fh)
# logger.addHandler(sh)
return func(*args, **kwargs)
return wrapper
@set_logging
def main():
logging.info('Launching !')
card_possibilities = [20, 30, 40, 50]
n_comp_possibilities = [10, 30, 50]
logging.debug('Creating pipelines')
regression_pipeline = Pipeline([
('sv', SuperVectorizer()),
('estimator', RandomForestRegressor()),
])
classification_pipeline = Pipeline([
('sv', SuperVectorizer()),
('estimator', RandomForestClassifier()),
])
logging.debug(f'With cardinality possibilities: {card_possibilities} '
f'and n_components possibilities: {n_comp_possibilities}')
for pipeline, datasets in zip(
[
regression_pipeline,
classification_pipeline,
],
[
get_regression_datasets(),
get_classification_datasets(),
]
):
for info, name in datasets:
X, y = get_dataset(info)
if name != 'traffic_violations':
continue
csv_path = Path('.').resolve() / f'{name}_results.csv'
if csv_path.exists():
# If the results already exist, we'll skip to the next
logging.debug(f'Skipping {name} as {csv_path!s} was found')
continue
logging.debug(f'Running search on {name}')
grid = GridSearchCV(
estimator=pipeline,
param_grid={
'sv__cardinality_threshold': card_possibilities,
'sv__high_card_str_transformer__n_components': n_comp_possibilities,
},
n_jobs=30,
)
grid.fit(X, y)
df = pd.DataFrame(grid.cv_results_)
df.to_csv(csv_path)
logging.info(f'Saved search results in {csv_path!s}')
if __name__ == '__main__':
main()
| true | true |
f726c100e03118fe63b2ed7bad2293c84c8e95ee | 282 | py | Python | scripts/batch_stop.py | oretoise/slate | cfbf629417680cd0fe6d745f7d8a50275aef00a9 | [
"MIT"
] | null | null | null | scripts/batch_stop.py | oretoise/slate | cfbf629417680cd0fe6d745f7d8a50275aef00a9 | [
"MIT"
] | null | null | null | scripts/batch_stop.py | oretoise/slate | cfbf629417680cd0fe6d745f7d8a50275aef00a9 | [
"MIT"
] | null | null | null | import pyautogui
pyautogui.PAUSE = 5
while True:
# Click first email in list.
pyautogui.click(640, 345)
# Stop it
pyautogui.click(1760, 430)
pyautogui.click(980, 1020)
pyautogui.typewrite("STOP")
pyautogui.press('enter')
pyautogui.click(580, 220) | 18.8 | 32 | 0.670213 | import pyautogui
pyautogui.PAUSE = 5
while True:
pyautogui.click(640, 345)
pyautogui.click(1760, 430)
pyautogui.click(980, 1020)
pyautogui.typewrite("STOP")
pyautogui.press('enter')
pyautogui.click(580, 220) | true | true |
f726c15fd0c5805fefb311e2ad443ac3c19afea2 | 802 | py | Python | manage.py | guanqingqi/dove | f8681f144e44369bf9e0c9ea76e1994920a14cfb | [
"Apache-2.0"
] | null | null | null | manage.py | guanqingqi/dove | f8681f144e44369bf9e0c9ea76e1994920a14cfb | [
"Apache-2.0"
] | null | null | null | manage.py | guanqingqi/dove | f8681f144e44369bf9e0c9ea76e1994920a14cfb | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "dove.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
| 34.869565 | 77 | 0.640898 |
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "dove.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
| true | true |
f726c164a1629b8cd7489e23860a47602ec42ab6 | 13,533 | py | Python | ph5/utilities/dumpsgy.py | kujaku11/PH5_py3 | bd0ae3be843bae70f08b03d3d95913473288c3a6 | [
"MIT"
] | null | null | null | ph5/utilities/dumpsgy.py | kujaku11/PH5_py3 | bd0ae3be843bae70f08b03d3d95913473288c3a6 | [
"MIT"
] | null | null | null | ph5/utilities/dumpsgy.py | kujaku11/PH5_py3 | bd0ae3be843bae70f08b03d3d95913473288c3a6 | [
"MIT"
] | null | null | null | #!/usr/bin/env pnpython3
#
# Simple program to read and display SEG-Y file
#
# Steve Azevedo
#
import argparse
import logging
import os
from ph5.core import segy_h, ibmfloat, ebcdic
import construct
PROG_VERSION = '2019.14'
LOGGER = logging.getLogger(__name__)
SAMPLE_LENGTH = {1: 4, 2: 4, 3: 2, 4: 4, 5: 4, 8: 1}
SIZEOF = {"lineSeq": 32, "reelSeq": 32, "event_number": 32,
"channel_number": 32, "energySourcePt": 32, "cdpEns": 32,
"traceInEnsemble": 32,
"traceID": 16, "vertSum": 16, "horSum": 16, "dataUse": 16,
"sourceToRecDist": 32, "recElevation": 32,
"sourceSurfaceElevation": 32,
"sourceDepth": 32, "datumElevRec": 32, "datumElevSource": 32,
"sourceWaterDepth": 32, "recWaterDepth": 32, "elevationScale": 16,
"coordScale": 16, "sourceLongOrX": 32, "sourceLatOrY": 32,
"recLongOrX": 32, "recLatOrY": 32, "coordUnits": 16,
"weatheringVelocity": 16,
"subWeatheringVelocity": 16, "sourceUpholeTime": 16,
"recUpholeTime": 16, "sourceStaticCor": 16, "recStaticCor": 16,
"totalStatic": 16,
"lagTimeA": 16, "lagTimeB": 16, "delay": 16, "muteStart": 16,
"muteEnd": 16, "sampleLength": 16, "deltaSample": 16, "gainType": 16,
"gainConst": 16, "initialGain": 16, "correlated": 16,
"sweepStart": 16, "sweepEnd": 16, "sweepLength": 16, "sweepType": 16,
"sweepTaperAtStart": 16, "sweepTaperAtEnd": 16, "taperType": 16,
"aliasFreq": 16, "aliasSlope": 16, "notchFreq": 16, "notchSlope": 16,
"lowCutFreq": 16, "hiCutFreq": 16, "lowCutSlope": 16,
"hiCutSlope": 16, "year": 16, "day": 16, "hour": 16, "minute": 16,
"second": 16,
"timeBasisCode": 16, "traceWeightingFactor": 16, "phoneRollPos1": 16,
"phoneFirstTrace": 16, "phoneLastTrace": 16, "gapSize": 16,
"taperOvertravel": 16, "station_name": 48, "sensor_serial": 64,
"channel_name": 16, "totalStaticHi": 16, "samp_rate": 32,
"data_form": 16,
"m_secs": 16, "trigyear": 16, "trigday": 16, "trighour": 16,
"trigminute": 16, "trigsecond": 16, "trigmills": 16, "scale_fac": 32,
"inst_no": 16, "unassigned": 16, "num_samps": 32, "max": 32,
"min": 32, "start_usec": 32, "shot_size": 16, "shot_year": 16,
"shot_doy": 16,
"shot_hour": 16, "shot_minute": 16, "shot_second": 16, "shot_us": 32,
"si_override": 32, "sensor_azimuth": 16, "sensor_inclination": 16,
"lmo_ms": 32, "lmo_flag": 16, "inst_type": 16, "correction": 16,
"azimuth": 16, "sensor_type": 16, "sensor_sn": 16, "das_sn": 16,
"empty1": 16,
"samples": 32, "empty2": 32, "clock_drift": 16, "empty3": 16,
"waterDelay": 32, "startMute": 32, "endMute": 32, "sampleInt": 32,
"waterBottomTime": 32, "endOfRp": 32, "dummy1": 32, "dummy2": 32,
"dummy3": 32, "dummy4": 32, "dummy5": 32, "dummy6": 32, "dummy7": 32,
"dummy8": 32, "dummy9": 32, "Xcoor": 32, "Ycoor": 32, "Inn": 32,
"Cnn": 32, "Spn": 32, "Scal": 16, "Tvmu": 16, "Tucmant": 32,
"Tucexp": 16,
"Tdu": 16, "Dti": 16, "Tscaler": 16, "Sto": 16, "Sed": 48,
"Smsmant": 32, "Smsexp": 16, "Smu": 16, "num_samps": 32,
"samp_rate": 32, "Revision": 16,
"ShotID": 32, "AuxChanSig": 8, "AuxChanID": 8, "SPL": 32, "SPS": 32,
"unass01": 16, "unass02": 16, "SenInt": 8, "VectSens": 8,
"HorAz": 16,
"VertAngle": 16,
"SourceType": 8, "SensorType": 8, "AuxChanSetType": 8,
"NoiseEditType": 8, "NoiseEditGate": 16, "SystemDevice": 8, "FSU": 3,
"DevChan": 8, "SourceCoCo": 8,
"DevStatusBits": 8, "BITTest": 8, "SweepPhaseRot": 16, "unass03": 8,
"BoxFun": 8, "SourceEffortM": 32, "SourceEffortE": 16,
"SourceUnits": 16,
"EventType": 8, "SensorTypeID": 8, "SensorSerial": 3,
"SensorVersion": 8, "SensorRev": 8, "VOR": 8, }
def get_args():
global FH, TYPE, PRINT, L, T, F, ENDIAN, EBCDIC
FH = None
TYPE = None
PRINT = False
L = None
T = None
F = None
parser = argparse.ArgumentParser(
formatter_class=argparse.RawTextHelpFormatter)
parser.usage = "Version: {0} Usage: dumpsgy [options]".format(
PROG_VERSION)
parser.add_argument("-f", action="store", dest="infile", type=str,
required=True)
parser.add_argument("-t", action="store", dest="ttype",
choices=['U', 'P', 'S', 'N', 'I'],
help=("Extended trace header style. U => USGS Menlo, "
"P => PASSCAL, S => SEG, I => SIOSEIS, "
"N => iNova FireFly"), default='S')
parser.add_argument("-p", action="store_true",
dest="print_true", default=False)
parser.add_argument("-L", action="store",
dest="bytes_per_trace", type=int)
parser.add_argument("-T", action="store",
dest="traces_per_ensemble", type=int)
parser.add_argument("-F", action="store", dest="trace_format", type=int,
help=("1 = IBM - 4 bytes, 2 = INT - 4 bytes, "
"3 = INT - 2 bytes, 5 = IEEE - 4 bytes, "
"8 = INT - 1 byte"))
parser.add_argument("-e", action="store", dest="endian",
type=str, default='big',
help="Endianess: 'big' or 'little'. Default = 'big'")
parser.add_argument("-i", action="store_false", dest="ebcdic",
default=True, help="EBCDIC textural header.")
args = parser.parse_args()
FH = open(args.infile, 'rb')
TYPE = args.ttype
PRINT = args.print_true
L = args.bytes_per_trace
T = args.traces_per_ensemble
F = args.trace_format
ENDIAN = args.endian
EBCDIC = args.ebcdic
def read_text_header():
buf = FH.read(3200)
t = segy_h.Text()
return t.parse(buf)
def last_extended_header(container):
''' Return True if this contains an EndText stanza? '''
import re
lastRE = re.compile(r".*\(\(.*SEG\:.*[Ee][Nn][Dd][Tt][Ee][Xx][Tt].*\)\).*")
keys = segy_h.Text().__keys__
for k in keys:
what = "container.{0}".format(k)
if EBCDIC:
t = ebcdic.EbcdicToAscii(eval(what))
else:
t = eval(what)
if lastRE.match(t):
return True
return False
def print_text_header(container):
global TYPE
keys = segy_h.Text().__keys__
print "--------------- Textural Header ---------------"
for k in keys:
what = "container.{0}".format(k)
if EBCDIC:
print "{0}\t-\t{1:s}".format(k, ebcdic.EbcdicToAscii(eval(what)))
else:
print "{0}\t-\t{1:s}".format(k, eval(what))
if TYPE is None:
if k == '_38_':
try:
if EBCDIC:
s = ebcdic.EbcdicToAscii(eval(what))
else:
s = eval(what)
try:
flds = s.split()
if flds[1] == 'MENLO':
TYPE = 'U'
elif flds[1] == 'PASSCAL':
TYPE = 'P'
elif flds[1] == 'SEG':
TYPE = 'S'
elif flds[1] == 'SIOSEIS':
TYPE = 'I'
else:
TYPE = 'S'
except BaseException:
pass
except BaseException:
TYPE = 'P'
def read_binary_header():
buf = FH.read(400)
b = segy_h.Reel(ENDIAN)
ret = None
try:
ret = b.parse(buf)
except Exception as e:
LOGGER.error(e)
return ret
def print_binary_header(container):
if not container:
return
keys = segy_h.Reel().__keys__
print "---------- Binary Header ----------"
for k in keys:
what = "container.{0}".format(k)
print "{0:<20}\t---\t{1}".format(k, eval(what))
def read_trace_header():
buf = FH.read(180)
t = segy_h.Trace(ENDIAN)
return t.parse(buf)
def print_trace_header(container):
keys = segy_h.Trace().__keys__
tt = 0
print "---------- Trace Header ----------"
for k in keys:
what = "container.{0}".format(k)
try:
if tt == 9999:
raise
s = SIZEOF[k] / 8
foffset = "{0:<3} - {1:>3}".format(tt, tt + s - 1)
tt += s
except BaseException:
tt = 9999
foffset = "{0:<3} - {1:>3}".format('_', '_')
print "{2} {0:<20}\t---\t{1}".format(k, eval(what), foffset)
def read_extended_header():
buf = FH.read(60)
if TYPE == 'U':
e = segy_h.Menlo(ENDIAN)
elif TYPE == 'S':
e = segy_h.Seg(ENDIAN)
elif TYPE == 'P':
e = segy_h.Passcal(ENDIAN)
elif TYPE == 'I':
e = segy_h.Sioseis(ENDIAN)
elif TYPE == 'N':
e = segy_h.iNova(ENDIAN)
else:
return None
return e.parse(buf)
def print_extended_header(container):
if TYPE == 'U':
keys = segy_h.Menlo().__keys__
elif TYPE == 'S':
keys = segy_h.Seg().__keys__
elif TYPE == 'P':
keys = segy_h.Passcal().__keys__
elif TYPE == 'I':
keys = segy_h.Sioseis().__keys__
elif TYPE == 'N':
keys = segy_h.iNova().__keys__
else:
return None
tt = 180
print "---------- Extended Header ----------"
for k in keys:
what = "container.{0}".format(k)
try:
if tt == 9999:
raise
s = SIZEOF[k] / 8
if s < 1:
raise
foffset = "{0:<3} - {1:>3}".format(tt, tt + s - 1)
tt += s
except BaseException:
tt = 9999
foffset = "{0:<3} - {1:>3}".format('_', '_')
print "{2} {0:<20}\t---\t{1}".format(k, eval(what), foffset)
def read_trace(n, l, f=5):
ret = []
if PRINT is True:
for i in range(n):
buf = FH.read(l)
# IBM floats - 4 byte - Must be big endian
if f == 1:
ret.append(construct.BFloat32(
"x").parse(ibmfloat.ibm2ieee32(buf)))
# INT - 4 byte or 2 byte
elif f == 2:
if ENDIAN == 'little':
# Swap 4 byte
b = construct.SLInt32("x").parse(buf)
else:
b = construct.SBInt32("x").parse(buf)
ret.append(b)
elif f == 3:
if ENDIAN == 'little':
# Swap 2 byte
b = construct.SLInt16("x").parse(buf)
else:
b = construct.SBInt16("x").parse(buf)
ret.append(b)
# IEEE floats - 4 byte
elif f == 5:
if ENDIAN == 'little':
# Swap 4 byte
b = construct.LFloat32("x").parse(buf)
else:
b = construct.BFloat32("x").parse(buf)
ret.append(b)
# INT - 1 byte
elif f == 8:
ret.append(construct.SBInt8("x").parse(buf))
else:
FH.read(n * l)
return ret
def isEOF():
try:
n = FH.read(240)
if n != 240:
raise EOFError
FH.seek(-240, os.SEEK_CUR)
return False
except EOFError:
return True
def main():
global L, F, T
get_args()
text_container = read_text_header()
print_text_header(text_container)
binary_container = read_binary_header()
print_binary_header(binary_container)
if binary_container:
# Number of Extended Textural Headers
nt = binary_container.extxt
# Samples per trace
n = binary_container.hns
# Trace sample format
if F is None:
F = binary_container.format
# Bytes per sample
try:
ll = SAMPLE_LENGTH[binary_container.format]
except KeyError:
ll = 4
# Bytes per trace
if L is None:
L = ll * n
else:
n = int(L) / ll
# Traces per record
if T is None:
T = binary_container.ntrpr
else:
T = 1
n = ll = F = 0
# Print Extended Textural Headers
if nt > 0:
for x in range(nt):
text_container = read_text_header()
print_text_header(text_container)
elif nt == -1:
while True:
text_container = read_text_header()
print_text_header(text_container)
if last_extended_header(text_container):
break
while True:
for t in range(T):
trace_container = read_trace_header()
extended_header = read_extended_header()
# print t,
print_trace_header(trace_container)
print_extended_header(extended_header)
trace = read_trace(n, ll, F)
if trace:
print '------------------------'
for t in trace:
print t
if isEOF():
break
if __name__ == "__main__":
main()
| 31.545455 | 79 | 0.495677 |
import argparse
import logging
import os
from ph5.core import segy_h, ibmfloat, ebcdic
import construct
PROG_VERSION = '2019.14'
LOGGER = logging.getLogger(__name__)
SAMPLE_LENGTH = {1: 4, 2: 4, 3: 2, 4: 4, 5: 4, 8: 1}
SIZEOF = {"lineSeq": 32, "reelSeq": 32, "event_number": 32,
"channel_number": 32, "energySourcePt": 32, "cdpEns": 32,
"traceInEnsemble": 32,
"traceID": 16, "vertSum": 16, "horSum": 16, "dataUse": 16,
"sourceToRecDist": 32, "recElevation": 32,
"sourceSurfaceElevation": 32,
"sourceDepth": 32, "datumElevRec": 32, "datumElevSource": 32,
"sourceWaterDepth": 32, "recWaterDepth": 32, "elevationScale": 16,
"coordScale": 16, "sourceLongOrX": 32, "sourceLatOrY": 32,
"recLongOrX": 32, "recLatOrY": 32, "coordUnits": 16,
"weatheringVelocity": 16,
"subWeatheringVelocity": 16, "sourceUpholeTime": 16,
"recUpholeTime": 16, "sourceStaticCor": 16, "recStaticCor": 16,
"totalStatic": 16,
"lagTimeA": 16, "lagTimeB": 16, "delay": 16, "muteStart": 16,
"muteEnd": 16, "sampleLength": 16, "deltaSample": 16, "gainType": 16,
"gainConst": 16, "initialGain": 16, "correlated": 16,
"sweepStart": 16, "sweepEnd": 16, "sweepLength": 16, "sweepType": 16,
"sweepTaperAtStart": 16, "sweepTaperAtEnd": 16, "taperType": 16,
"aliasFreq": 16, "aliasSlope": 16, "notchFreq": 16, "notchSlope": 16,
"lowCutFreq": 16, "hiCutFreq": 16, "lowCutSlope": 16,
"hiCutSlope": 16, "year": 16, "day": 16, "hour": 16, "minute": 16,
"second": 16,
"timeBasisCode": 16, "traceWeightingFactor": 16, "phoneRollPos1": 16,
"phoneFirstTrace": 16, "phoneLastTrace": 16, "gapSize": 16,
"taperOvertravel": 16, "station_name": 48, "sensor_serial": 64,
"channel_name": 16, "totalStaticHi": 16, "samp_rate": 32,
"data_form": 16,
"m_secs": 16, "trigyear": 16, "trigday": 16, "trighour": 16,
"trigminute": 16, "trigsecond": 16, "trigmills": 16, "scale_fac": 32,
"inst_no": 16, "unassigned": 16, "num_samps": 32, "max": 32,
"min": 32, "start_usec": 32, "shot_size": 16, "shot_year": 16,
"shot_doy": 16,
"shot_hour": 16, "shot_minute": 16, "shot_second": 16, "shot_us": 32,
"si_override": 32, "sensor_azimuth": 16, "sensor_inclination": 16,
"lmo_ms": 32, "lmo_flag": 16, "inst_type": 16, "correction": 16,
"azimuth": 16, "sensor_type": 16, "sensor_sn": 16, "das_sn": 16,
"empty1": 16,
"samples": 32, "empty2": 32, "clock_drift": 16, "empty3": 16,
"waterDelay": 32, "startMute": 32, "endMute": 32, "sampleInt": 32,
"waterBottomTime": 32, "endOfRp": 32, "dummy1": 32, "dummy2": 32,
"dummy3": 32, "dummy4": 32, "dummy5": 32, "dummy6": 32, "dummy7": 32,
"dummy8": 32, "dummy9": 32, "Xcoor": 32, "Ycoor": 32, "Inn": 32,
"Cnn": 32, "Spn": 32, "Scal": 16, "Tvmu": 16, "Tucmant": 32,
"Tucexp": 16,
"Tdu": 16, "Dti": 16, "Tscaler": 16, "Sto": 16, "Sed": 48,
"Smsmant": 32, "Smsexp": 16, "Smu": 16, "num_samps": 32,
"samp_rate": 32, "Revision": 16,
"ShotID": 32, "AuxChanSig": 8, "AuxChanID": 8, "SPL": 32, "SPS": 32,
"unass01": 16, "unass02": 16, "SenInt": 8, "VectSens": 8,
"HorAz": 16,
"VertAngle": 16,
"SourceType": 8, "SensorType": 8, "AuxChanSetType": 8,
"NoiseEditType": 8, "NoiseEditGate": 16, "SystemDevice": 8, "FSU": 3,
"DevChan": 8, "SourceCoCo": 8,
"DevStatusBits": 8, "BITTest": 8, "SweepPhaseRot": 16, "unass03": 8,
"BoxFun": 8, "SourceEffortM": 32, "SourceEffortE": 16,
"SourceUnits": 16,
"EventType": 8, "SensorTypeID": 8, "SensorSerial": 3,
"SensorVersion": 8, "SensorRev": 8, "VOR": 8, }
def get_args():
global FH, TYPE, PRINT, L, T, F, ENDIAN, EBCDIC
FH = None
TYPE = None
PRINT = False
L = None
T = None
F = None
parser = argparse.ArgumentParser(
formatter_class=argparse.RawTextHelpFormatter)
parser.usage = "Version: {0} Usage: dumpsgy [options]".format(
PROG_VERSION)
parser.add_argument("-f", action="store", dest="infile", type=str,
required=True)
parser.add_argument("-t", action="store", dest="ttype",
choices=['U', 'P', 'S', 'N', 'I'],
help=("Extended trace header style. U => USGS Menlo, "
"P => PASSCAL, S => SEG, I => SIOSEIS, "
"N => iNova FireFly"), default='S')
parser.add_argument("-p", action="store_true",
dest="print_true", default=False)
parser.add_argument("-L", action="store",
dest="bytes_per_trace", type=int)
parser.add_argument("-T", action="store",
dest="traces_per_ensemble", type=int)
parser.add_argument("-F", action="store", dest="trace_format", type=int,
help=("1 = IBM - 4 bytes, 2 = INT - 4 bytes, "
"3 = INT - 2 bytes, 5 = IEEE - 4 bytes, "
"8 = INT - 1 byte"))
parser.add_argument("-e", action="store", dest="endian",
type=str, default='big',
help="Endianess: 'big' or 'little'. Default = 'big'")
parser.add_argument("-i", action="store_false", dest="ebcdic",
default=True, help="EBCDIC textural header.")
args = parser.parse_args()
FH = open(args.infile, 'rb')
TYPE = args.ttype
PRINT = args.print_true
L = args.bytes_per_trace
T = args.traces_per_ensemble
F = args.trace_format
ENDIAN = args.endian
EBCDIC = args.ebcdic
def read_text_header():
buf = FH.read(3200)
t = segy_h.Text()
return t.parse(buf)
def last_extended_header(container):
''' Return True if this contains an EndText stanza? '''
import re
lastRE = re.compile(r".*\(\(.*SEG\:.*[Ee][Nn][Dd][Tt][Ee][Xx][Tt].*\)\).*")
keys = segy_h.Text().__keys__
for k in keys:
what = "container.{0}".format(k)
if EBCDIC:
t = ebcdic.EbcdicToAscii(eval(what))
else:
t = eval(what)
if lastRE.match(t):
return True
return False
def print_text_header(container):
global TYPE
keys = segy_h.Text().__keys__
print "--------------- Textural Header ---------------"
for k in keys:
what = "container.{0}".format(k)
if EBCDIC:
print "{0}\t-\t{1:s}".format(k, ebcdic.EbcdicToAscii(eval(what)))
else:
print "{0}\t-\t{1:s}".format(k, eval(what))
if TYPE is None:
if k == '_38_':
try:
if EBCDIC:
s = ebcdic.EbcdicToAscii(eval(what))
else:
s = eval(what)
try:
flds = s.split()
if flds[1] == 'MENLO':
TYPE = 'U'
elif flds[1] == 'PASSCAL':
TYPE = 'P'
elif flds[1] == 'SEG':
TYPE = 'S'
elif flds[1] == 'SIOSEIS':
TYPE = 'I'
else:
TYPE = 'S'
except BaseException:
pass
except BaseException:
TYPE = 'P'
def read_binary_header():
buf = FH.read(400)
b = segy_h.Reel(ENDIAN)
ret = None
try:
ret = b.parse(buf)
except Exception as e:
LOGGER.error(e)
return ret
def print_binary_header(container):
if not container:
return
keys = segy_h.Reel().__keys__
print "---------- Binary Header ----------"
for k in keys:
what = "container.{0}".format(k)
print "{0:<20}\t---\t{1}".format(k, eval(what))
def read_trace_header():
buf = FH.read(180)
t = segy_h.Trace(ENDIAN)
return t.parse(buf)
def print_trace_header(container):
keys = segy_h.Trace().__keys__
tt = 0
print "---------- Trace Header ----------"
for k in keys:
what = "container.{0}".format(k)
try:
if tt == 9999:
raise
s = SIZEOF[k] / 8
foffset = "{0:<3} - {1:>3}".format(tt, tt + s - 1)
tt += s
except BaseException:
tt = 9999
foffset = "{0:<3} - {1:>3}".format('_', '_')
print "{2} {0:<20}\t---\t{1}".format(k, eval(what), foffset)
def read_extended_header():
buf = FH.read(60)
if TYPE == 'U':
e = segy_h.Menlo(ENDIAN)
elif TYPE == 'S':
e = segy_h.Seg(ENDIAN)
elif TYPE == 'P':
e = segy_h.Passcal(ENDIAN)
elif TYPE == 'I':
e = segy_h.Sioseis(ENDIAN)
elif TYPE == 'N':
e = segy_h.iNova(ENDIAN)
else:
return None
return e.parse(buf)
def print_extended_header(container):
if TYPE == 'U':
keys = segy_h.Menlo().__keys__
elif TYPE == 'S':
keys = segy_h.Seg().__keys__
elif TYPE == 'P':
keys = segy_h.Passcal().__keys__
elif TYPE == 'I':
keys = segy_h.Sioseis().__keys__
elif TYPE == 'N':
keys = segy_h.iNova().__keys__
else:
return None
tt = 180
print "---------- Extended Header ----------"
for k in keys:
what = "container.{0}".format(k)
try:
if tt == 9999:
raise
s = SIZEOF[k] / 8
if s < 1:
raise
foffset = "{0:<3} - {1:>3}".format(tt, tt + s - 1)
tt += s
except BaseException:
tt = 9999
foffset = "{0:<3} - {1:>3}".format('_', '_')
print "{2} {0:<20}\t---\t{1}".format(k, eval(what), foffset)
def read_trace(n, l, f=5):
ret = []
if PRINT is True:
for i in range(n):
buf = FH.read(l)
if f == 1:
ret.append(construct.BFloat32(
"x").parse(ibmfloat.ibm2ieee32(buf)))
elif f == 2:
if ENDIAN == 'little':
b = construct.SLInt32("x").parse(buf)
else:
b = construct.SBInt32("x").parse(buf)
ret.append(b)
elif f == 3:
if ENDIAN == 'little':
b = construct.SLInt16("x").parse(buf)
else:
b = construct.SBInt16("x").parse(buf)
ret.append(b)
elif f == 5:
if ENDIAN == 'little':
b = construct.LFloat32("x").parse(buf)
else:
b = construct.BFloat32("x").parse(buf)
ret.append(b)
elif f == 8:
ret.append(construct.SBInt8("x").parse(buf))
else:
FH.read(n * l)
return ret
def isEOF():
try:
n = FH.read(240)
if n != 240:
raise EOFError
FH.seek(-240, os.SEEK_CUR)
return False
except EOFError:
return True
def main():
global L, F, T
get_args()
text_container = read_text_header()
print_text_header(text_container)
binary_container = read_binary_header()
print_binary_header(binary_container)
if binary_container:
nt = binary_container.extxt
n = binary_container.hns
if F is None:
F = binary_container.format
try:
ll = SAMPLE_LENGTH[binary_container.format]
except KeyError:
ll = 4
if L is None:
L = ll * n
else:
n = int(L) / ll
if T is None:
T = binary_container.ntrpr
else:
T = 1
n = ll = F = 0
if nt > 0:
for x in range(nt):
text_container = read_text_header()
print_text_header(text_container)
elif nt == -1:
while True:
text_container = read_text_header()
print_text_header(text_container)
if last_extended_header(text_container):
break
while True:
for t in range(T):
trace_container = read_trace_header()
extended_header = read_extended_header()
print_trace_header(trace_container)
print_extended_header(extended_header)
trace = read_trace(n, ll, F)
if trace:
print '------------------------'
for t in trace:
print t
if isEOF():
break
if __name__ == "__main__":
main()
| false | true |
f726c16c09d5ab1fb493fbccc82d1e44044c2174 | 11,918 | py | Python | flan/export.py | bretlowery/flan | b79319044fcdb2230ac090232e9056719cb09f17 | [
"MIT"
] | 3 | 2019-08-03T13:27:31.000Z | 2021-06-08T16:25:31.000Z | flan/export.py | bretlowery/flan | b79319044fcdb2230ac090232e9056719cb09f17 | [
"MIT"
] | 2 | 2020-09-24T10:44:55.000Z | 2021-06-25T15:31:24.000Z | flan/export.py | bretlowery/flan | b79319044fcdb2230ac090232e9056719cb09f17 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
#
# Copyright 2012 Splunk, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"): you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
This software exports a splunk index using the streaming export endpoint
using a parameterized chunking mechanism.
"""
# installation support files
from __future__ import absolute_import
from __future__ import print_function
import sys, os
sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..", ".."))
import time
from os import path
# splunk support files
from splunklib.binding import connect
try:
from utils import parse
except ImportError:
raise Exception("Add the SDK repository to your PYTHONPATH to run the examples "
"(e.g., export PYTHONPATH=~/splunk-sdk-python.")
# hidden file
OUTPUT_FILE = "./export.out"
OUTPUT_MODE = "xml"
OUTPUT_MODES = ["csv", "xml", "json"]
CLIRULES = {
'end': {
'flags': ["--endtime"],
'default': "",
'help': "Start time of export (default is start of index)"
},
'index': {
'flags': ["--index"],
'default': "*",
'help': "Index to export (default is all user defined indices)"
},
'omode': {
'flags': ["--omode"],
'default': OUTPUT_MODE,
'help': "output format %s default is %s" % (OUTPUT_MODES, OUTPUT_MODE)
},
'output': {
'flags': ["--output"],
'default': OUTPUT_FILE,
'help': "Output file name (default is %s)" % OUTPUT_FILE
},
'recover': {
'flags': ["--recover"],
'default': False,
'help': "Export attempts to recover from end of existing export"
},
'search': {
'flags': ["--search"],
'default': "search *",
'help': "search string (default 'search *')"
},
'start': {
'flags': ["--starttime"],
'default': "",
'help': "Start time of export (default is start of index)"
}
}
def get_csv_next_event_start(location, event_buffer):
""" determin the event start and end of *any* valid event """
start = -1
end = -1
event_start = event_buffer.find("\n", location + 1)
event_end = event_buffer.find('"\n', event_start + 1)
while (event_end > 0):
parts = event_buffer[event_start:event_end].split(",")
# test parts 0 and 1 of CSV. Format should be time.qqq, anything
# else is not time stamp to keep moving.
try:
int(parts[0].replace('\n', ""))
timestamp = parts[1].replace('"', "")
timeparts = timestamp.split('.')
int(timeparts[0])
int(timeparts[1])
return (event_start, event_end)
except:
event_start = event_buffer.find("\n", event_end + 2)
event_end = event_buffer.find('"\n', event_start + 1)
return (start, end)
def get_csv_event_start(event_buffer):
""" get the event start of an event that is different (in time)from the
adjoining event, in CSV format """
(start, end) = get_csv_next_event_start(0, event_buffer)
if start < 0:
return (-1, -1, "")
print(event_buffer[start:end])
tstart = event_buffer.find(",", start)
tend = event_buffer.find(",", tstart + 1)
print(event_buffer[tstart:tend])
last_time = event_buffer[tstart + 1:tend].replace('"', "")
while end > 0:
(start, end) = get_csv_next_event_start(start, event_buffer)
if end < 0:
return (-1, -1, "")
tstart = event_buffer.find(",", start)
tend = event_buffer.find(",", tstart + 1)
this_time = event_buffer[tstart + 1:tend].replace('"', "")
if this_time != last_time:
return (start, end + 1, last_time)
return (-1, -1, "")
def get_xml_event_start(event_buffer):
""" get the event start of an event that is different (in time)from the
adjoining event, in XML format """
result_pattern = "<result offset='"
time_key_pattern = "<field k='_time'>"
time_start_pattern = "<value><text>"
time_end_pattern = "<"
event_end_pattern = "</result>"
event_start = event_buffer.find(result_pattern)
event_end = event_buffer.find(event_end_pattern, event_start) + \
len(event_end_pattern)
if event_end < 0:
return (-1, -1, "")
time_key_start = event_buffer.find(time_key_pattern, event_start)
time_start = event_buffer.find(time_start_pattern, time_key_start) + \
len(time_start_pattern)
time_end = event_buffer.find(time_end_pattern, time_start + 1)
last_time = event_buffer[time_start:time_end]
# wallk through events until time changes
event_start = event_end
while event_end > 0:
event_start = event_buffer.find(result_pattern, event_start + 1)
event_end = event_buffer.find(event_end_pattern, event_start) + \
len(event_end_pattern)
if event_end < 0:
return (-1, -1, "")
time_key_start = event_buffer.find(time_key_pattern, event_start)
time_start = event_buffer.find(time_start_pattern, time_key_start)
time_end = event_buffer.find(time_end_pattern, time_start)
this_time = event_buffer[time_start:time_end]
if this_time != last_time:
return (event_start, event_end, last_time)
event_start = event_end
return (-1, -1, "")
def get_json_event_start(event_buffer):
""" get the event start of an event that is different (in time)from the
adjoining event, in XML format """
event_start_pattern = '{"_cd":"'
time_key_pattern = '"_time":"'
time_end_pattern = '"'
event_end_pattern = '"},\n'
event_end_pattern2 = '"}[]' # old json output format bug
event_start = event_buffer.find(event_start_pattern)
event_end = event_buffer.find(event_end_pattern, event_start) + \
len(event_end_pattern)
if event_end < 0:
event_end = event_buffer.find(event_end_pattern2, event_start) + \
len(event_end_pattern2)
if (event_end < 0):
return (-1, -1, "")
time_start = event_buffer.find(time_key_pattern, event_start) + \
len(time_key_pattern)
time_end = event_buffer.find(time_end_pattern, time_start + 1)
last_time = event_buffer[time_start:time_end]
event_start = event_end
while event_end > 0:
event_start = event_buffer.find(event_start_pattern, event_start + 1)
event_end = event_buffer.find(event_end_pattern, event_start) + \
len(event_end_pattern)
if event_end < 0:
event_end = event_buffer.find(event_end_pattern2, event_start) + \
len(event_end_pattern2)
if (event_end < 0):
return (-1, -1, "")
time_start = event_buffer.find(time_key_pattern, event_start) + \
len(time_key_pattern)
time_end = event_buffer.find(time_end_pattern, time_start + 1)
this_time = event_buffer[time_start:time_end]
if this_time != last_time:
return (event_start - 2, event_end, last_time)
event_start = event_end
return (-1, -1, "")
def get_event_start(event_buffer, event_format):
""" dispatch event start method based on event format type """
if event_format == "csv":
return get_csv_event_start(event_buffer)
elif event_format == "xml":
return get_xml_event_start(event_buffer)
else:
return get_json_event_start(event_buffer)
def recover(options):
""" recover from an existing export run. We do this by
finding the last time change between events, truncate the file
and restart from there """
event_format = options.kwargs['omode']
buffer_size = 64 * 1024
fpd = open(options.kwargs['output'], "r+")
fpd.seek(0, 2) # seek to end
fptr = max(fpd.tell() - buffer_size, 0)
fptr_eof = 0
while (fptr > 0):
fpd.seek(fptr)
event_buffer = fpd.read(buffer_size)
(event_start, next_event_start, last_time) = \
get_event_start(event_buffer, event_format)
if (event_start != -1):
fptr_eof = event_start + fptr
break
fptr = fptr - buffer_size
if fptr < 0:
# didn't find a valid event, so start over
fptr_eof = 0
last_time = 0
# truncate file here
fpd.truncate(fptr_eof)
fpd.seek(fptr_eof)
fpd.write("\n")
fpd.close()
return last_time
def cleanup_tail(options):
""" cleanup the tail of a recovery """
if options.kwargs['omode'] == "csv":
options.kwargs['fd'].write("\n")
elif options.kwargs['omode'] == "xml":
options.kwargs['fd'].write("\n</results>\n")
else:
options.kwargs['fd'].write("\n]\n")
def export(options, service):
""" main export method: export any number of indexes """
start = options.kwargs['start']
end = options.kwargs['end']
fixtail = options.kwargs['fixtail']
once = True
squery = options.kwargs['search']
squery = squery + " index=%s" % options.kwargs['index']
if (start != ""):
squery = squery + " earliest_time=%s" % start
if (end != ""):
squery = squery + " latest_time=%s" % end
success = False
while not success:
# issue query to splunkd
# count=0 overrides the maximum number of events
# returned (normally 50K) regardless of what the .conf
# file for splunkd says.
result = service.get('search/jobs/export',
search=squery,
output_mode=options.kwargs['omode'],
timeout=60,
earliest_time="0.000",
time_format="%s.%Q",
count=0)
if result.status != 200:
print("warning: export job failed: %d, sleep/retry" % result.status)
time.sleep(60)
else:
success = True
# write export file
while True:
if fixtail and once:
cleanup_tail(options)
once = False
content = result.body.read()
if len(content) == 0:
break
options.kwargs['fd'].write(content)
options.kwargs['fd'].write("\n")
options.kwargs['fd'].flush()
def main():
""" main entry """
options = parse(sys.argv[1:], CLIRULES, ".splunkrc")
if options.kwargs['omode'] not in OUTPUT_MODES:
print("output mode must be one of %s, found %s" % (OUTPUT_MODES,
options.kwargs['omode']))
sys.exit(1)
service = connect(**options.kwargs)
if path.exists(options.kwargs['output']):
if not options.kwargs['recover']:
print("Export file %s exists, and recover option nor specified" % \
options.kwargs['output'])
sys.exit(1)
else:
options.kwargs['end'] = recover(options)
options.kwargs['fixtail'] = True
openmode = "a"
else:
openmode = "w"
options.kwargs['fixtail'] = False
try:
options.kwargs['fd'] = open(options.kwargs['output'], openmode)
except IOError:
print("Failed to open output file %s w/ mode %s" % \
(options.kwargs['output'], openmode))
sys.exit(1)
export(options, service)
if __name__ == '__main__':
main() | 32.38587 | 84 | 0.601275 |
from __future__ import absolute_import
from __future__ import print_function
import sys, os
sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..", ".."))
import time
from os import path
from splunklib.binding import connect
try:
from utils import parse
except ImportError:
raise Exception("Add the SDK repository to your PYTHONPATH to run the examples "
"(e.g., export PYTHONPATH=~/splunk-sdk-python.")
OUTPUT_FILE = "./export.out"
OUTPUT_MODE = "xml"
OUTPUT_MODES = ["csv", "xml", "json"]
CLIRULES = {
'end': {
'flags': ["--endtime"],
'default': "",
'help': "Start time of export (default is start of index)"
},
'index': {
'flags': ["--index"],
'default': "*",
'help': "Index to export (default is all user defined indices)"
},
'omode': {
'flags': ["--omode"],
'default': OUTPUT_MODE,
'help': "output format %s default is %s" % (OUTPUT_MODES, OUTPUT_MODE)
},
'output': {
'flags': ["--output"],
'default': OUTPUT_FILE,
'help': "Output file name (default is %s)" % OUTPUT_FILE
},
'recover': {
'flags': ["--recover"],
'default': False,
'help': "Export attempts to recover from end of existing export"
},
'search': {
'flags': ["--search"],
'default': "search *",
'help': "search string (default 'search *')"
},
'start': {
'flags': ["--starttime"],
'default': "",
'help': "Start time of export (default is start of index)"
}
}
def get_csv_next_event_start(location, event_buffer):
start = -1
end = -1
event_start = event_buffer.find("\n", location + 1)
event_end = event_buffer.find('"\n', event_start + 1)
while (event_end > 0):
parts = event_buffer[event_start:event_end].split(",")
# test parts 0 and 1 of CSV. Format should be time.qqq, anything
# else is not time stamp to keep moving.
try:
int(parts[0].replace('\n', ""))
timestamp = parts[1].replace('"', "")
timeparts = timestamp.split('.')
int(timeparts[0])
int(timeparts[1])
return (event_start, event_end)
except:
event_start = event_buffer.find("\n", event_end + 2)
event_end = event_buffer.find('"\n', event_start + 1)
return (start, end)
def get_csv_event_start(event_buffer):
(start, end) = get_csv_next_event_start(0, event_buffer)
if start < 0:
return (-1, -1, "")
print(event_buffer[start:end])
tstart = event_buffer.find(",", start)
tend = event_buffer.find(",", tstart + 1)
print(event_buffer[tstart:tend])
last_time = event_buffer[tstart + 1:tend].replace('"', "")
while end > 0:
(start, end) = get_csv_next_event_start(start, event_buffer)
if end < 0:
return (-1, -1, "")
tstart = event_buffer.find(",", start)
tend = event_buffer.find(",", tstart + 1)
this_time = event_buffer[tstart + 1:tend].replace('"', "")
if this_time != last_time:
return (start, end + 1, last_time)
return (-1, -1, "")
def get_xml_event_start(event_buffer):
result_pattern = "<result offset='"
time_key_pattern = "<field k='_time'>"
time_start_pattern = "<value><text>"
time_end_pattern = "<"
event_end_pattern = "</result>"
event_start = event_buffer.find(result_pattern)
event_end = event_buffer.find(event_end_pattern, event_start) + \
len(event_end_pattern)
if event_end < 0:
return (-1, -1, "")
time_key_start = event_buffer.find(time_key_pattern, event_start)
time_start = event_buffer.find(time_start_pattern, time_key_start) + \
len(time_start_pattern)
time_end = event_buffer.find(time_end_pattern, time_start + 1)
last_time = event_buffer[time_start:time_end]
# wallk through events until time changes
event_start = event_end
while event_end > 0:
event_start = event_buffer.find(result_pattern, event_start + 1)
event_end = event_buffer.find(event_end_pattern, event_start) + \
len(event_end_pattern)
if event_end < 0:
return (-1, -1, "")
time_key_start = event_buffer.find(time_key_pattern, event_start)
time_start = event_buffer.find(time_start_pattern, time_key_start)
time_end = event_buffer.find(time_end_pattern, time_start)
this_time = event_buffer[time_start:time_end]
if this_time != last_time:
return (event_start, event_end, last_time)
event_start = event_end
return (-1, -1, "")
def get_json_event_start(event_buffer):
event_start_pattern = '{"_cd":"'
time_key_pattern = '"_time":"'
time_end_pattern = '"'
event_end_pattern = '"},\n'
event_end_pattern2 = '"}[]' # old json output format bug
event_start = event_buffer.find(event_start_pattern)
event_end = event_buffer.find(event_end_pattern, event_start) + \
len(event_end_pattern)
if event_end < 0:
event_end = event_buffer.find(event_end_pattern2, event_start) + \
len(event_end_pattern2)
if (event_end < 0):
return (-1, -1, "")
time_start = event_buffer.find(time_key_pattern, event_start) + \
len(time_key_pattern)
time_end = event_buffer.find(time_end_pattern, time_start + 1)
last_time = event_buffer[time_start:time_end]
event_start = event_end
while event_end > 0:
event_start = event_buffer.find(event_start_pattern, event_start + 1)
event_end = event_buffer.find(event_end_pattern, event_start) + \
len(event_end_pattern)
if event_end < 0:
event_end = event_buffer.find(event_end_pattern2, event_start) + \
len(event_end_pattern2)
if (event_end < 0):
return (-1, -1, "")
time_start = event_buffer.find(time_key_pattern, event_start) + \
len(time_key_pattern)
time_end = event_buffer.find(time_end_pattern, time_start + 1)
this_time = event_buffer[time_start:time_end]
if this_time != last_time:
return (event_start - 2, event_end, last_time)
event_start = event_end
return (-1, -1, "")
def get_event_start(event_buffer, event_format):
if event_format == "csv":
return get_csv_event_start(event_buffer)
elif event_format == "xml":
return get_xml_event_start(event_buffer)
else:
return get_json_event_start(event_buffer)
def recover(options):
event_format = options.kwargs['omode']
buffer_size = 64 * 1024
fpd = open(options.kwargs['output'], "r+")
fpd.seek(0, 2) # seek to end
fptr = max(fpd.tell() - buffer_size, 0)
fptr_eof = 0
while (fptr > 0):
fpd.seek(fptr)
event_buffer = fpd.read(buffer_size)
(event_start, next_event_start, last_time) = \
get_event_start(event_buffer, event_format)
if (event_start != -1):
fptr_eof = event_start + fptr
break
fptr = fptr - buffer_size
if fptr < 0:
# didn't find a valid event, so start over
fptr_eof = 0
last_time = 0
fpd.truncate(fptr_eof)
fpd.seek(fptr_eof)
fpd.write("\n")
fpd.close()
return last_time
def cleanup_tail(options):
if options.kwargs['omode'] == "csv":
options.kwargs['fd'].write("\n")
elif options.kwargs['omode'] == "xml":
options.kwargs['fd'].write("\n</results>\n")
else:
options.kwargs['fd'].write("\n]\n")
def export(options, service):
start = options.kwargs['start']
end = options.kwargs['end']
fixtail = options.kwargs['fixtail']
once = True
squery = options.kwargs['search']
squery = squery + " index=%s" % options.kwargs['index']
if (start != ""):
squery = squery + " earliest_time=%s" % start
if (end != ""):
squery = squery + " latest_time=%s" % end
success = False
while not success:
result = service.get('search/jobs/export',
search=squery,
output_mode=options.kwargs['omode'],
timeout=60,
earliest_time="0.000",
time_format="%s.%Q",
count=0)
if result.status != 200:
print("warning: export job failed: %d, sleep/retry" % result.status)
time.sleep(60)
else:
success = True
while True:
if fixtail and once:
cleanup_tail(options)
once = False
content = result.body.read()
if len(content) == 0:
break
options.kwargs['fd'].write(content)
options.kwargs['fd'].write("\n")
options.kwargs['fd'].flush()
def main():
options = parse(sys.argv[1:], CLIRULES, ".splunkrc")
if options.kwargs['omode'] not in OUTPUT_MODES:
print("output mode must be one of %s, found %s" % (OUTPUT_MODES,
options.kwargs['omode']))
sys.exit(1)
service = connect(**options.kwargs)
if path.exists(options.kwargs['output']):
if not options.kwargs['recover']:
print("Export file %s exists, and recover option nor specified" % \
options.kwargs['output'])
sys.exit(1)
else:
options.kwargs['end'] = recover(options)
options.kwargs['fixtail'] = True
openmode = "a"
else:
openmode = "w"
options.kwargs['fixtail'] = False
try:
options.kwargs['fd'] = open(options.kwargs['output'], openmode)
except IOError:
print("Failed to open output file %s w/ mode %s" % \
(options.kwargs['output'], openmode))
sys.exit(1)
export(options, service)
if __name__ == '__main__':
main() | true | true |
f726c1895f87278ef2674e56273fb6b067545c0c | 392 | py | Python | python/python_backup/PRAC_PYTHON/dc.py | SayanGhoshBDA/code-backup | 8b6135facc0e598e9686b2e8eb2d69dd68198b80 | [
"MIT"
] | 16 | 2018-11-26T08:39:42.000Z | 2019-05-08T10:09:52.000Z | python/python_backup/PRAC_PYTHON/dc.py | SayanGhoshBDA/code-backup | 8b6135facc0e598e9686b2e8eb2d69dd68198b80 | [
"MIT"
] | 8 | 2020-05-04T06:29:26.000Z | 2022-02-12T05:33:16.000Z | python/python_backup/PRAC_PYTHON/dc.py | SayanGhoshBDA/code-backup | 8b6135facc0e598e9686b2e8eb2d69dd68198b80 | [
"MIT"
] | 5 | 2020-02-11T16:02:21.000Z | 2021-02-05T07:48:30.000Z | class palindrome:
def __init__(self):
self.a=""
def input(self,k1):
self.a=k1
def calculate(self):
f=0
j=len(k1)-1
while i<len(k1)/2:
if k1[i]!=k1[j]:
f=1
else:
i=i+1
j=j-1
if f==0:
print "self.a is palindrome"
else:
print "self.a is not a palindrome"
x=palindrome()
a=input("enter string:")
x.input(a)
x.calculate() | 17.818182 | 38 | 0.545918 | class palindrome:
def __init__(self):
self.a=""
def input(self,k1):
self.a=k1
def calculate(self):
f=0
j=len(k1)-1
while i<len(k1)/2:
if k1[i]!=k1[j]:
f=1
else:
i=i+1
j=j-1
if f==0:
print "self.a is palindrome"
else:
print "self.a is not a palindrome"
x=palindrome()
a=input("enter string:")
x.input(a)
x.calculate() | false | true |
f726c1f060b031498baf48c9527e53700f69bbf2 | 6,961 | py | Python | virt/ansible-latest/lib/python2.7/site-packages/ansible/modules/network/aos/_aos_device.py | lakhlaifi/RedHat-Ansible | 27c5077cced9d416081fcd5d69ea44bca0317fa4 | [
"Apache-2.0"
] | 1 | 2020-03-29T18:41:01.000Z | 2020-03-29T18:41:01.000Z | ansible/ansible/modules/network/aos/_aos_device.py | SergeyCherepanov/ansible | 875711cd2fd6b783c812241c2ed7a954bf6f670f | [
"MIT"
] | 7 | 2020-09-07T17:27:56.000Z | 2022-03-02T06:25:46.000Z | ansible/ansible/modules/network/aos/_aos_device.py | SergeyCherepanov/ansible | 875711cd2fd6b783c812241c2ed7a954bf6f670f | [
"MIT"
] | 1 | 2020-03-22T01:04:48.000Z | 2020-03-22T01:04:48.000Z | #!/usr/bin/python
#
# (c) 2017 Apstra Inc, <community@apstra.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['deprecated'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: aos_device
author: Damien Garros (@dgarros)
version_added: "2.3"
short_description: Manage Devices on AOS Server
deprecated:
removed_in: "2.9"
why: This module does not support AOS 2.1 or later
alternative: See new modules at U(https://www.ansible.com/ansible-apstra).
description:
- Apstra AOS Device module let you manage your devices in AOS easily. You can
approve devices and define in which state the device should be. Currently
only the state I(normal) is supported but the goal is to extend this module
with additional state. This module is idempotent and support the I(check) mode.
It's using the AOS REST API.
requirements:
- "aos-pyez >= 0.6.0"
options:
session:
description:
- An existing AOS session as obtained by M(aos_login) module.
required: true
name:
description:
- The device serial-number; i.e. uniquely identifies the device in the
AOS system. Only one of I(name) or I(id) can be set.
id:
description:
- The AOS internal id for a device; i.e. uniquely identifies the device in the
AOS system. Only one of I(name) or I(id) can be set.
state:
description:
- Define in which state the device should be. Currently only I(normal)
is supported but the goal is to add I(maint) and I(decomm).
default: normal
choices: ['normal']
approve:
description:
- The approve argument instruct the module to convert a device in quarantine
mode into approved mode.
default: "no"
type: bool
location:
description:
- When approving a device using the I(approve) argument, it's possible
define the location of the device.
'''
EXAMPLES = '''
- name: Approve a new device
aos_device:
session: "{{ aos_session }}"
name: D2060B2F105429GDABCD123
state: 'normal'
approve: true
location: "rack-45, ru-18"
'''
RETURNS = '''
name:
description: Name of the Device, usually the serial-number.
returned: always
type: str
sample: Server-IpAddrs
id:
description: AOS unique ID assigned to the Device
returned: always
type: str
sample: fcc4ac1c-e249-4fe7-b458-2138bfb44c06
value:
description: Value of the object as returned by the AOS Server
returned: always
type: dict
sample: {'...'}
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.aos.aos import HAS_AOS_PYEZ, get_aos_session, check_aos_version, find_collection_item
if HAS_AOS_PYEZ:
from apstra.aosom.exc import SessionError, SessionRqstError
def aos_device_normal(module, aos, dev):
margs = module.params
# If approve is define, check if the device needs to be approved or not
if margs['approve'] is not None:
if dev.is_approved:
module.exit_json(changed=False,
name=dev.name,
id=dev.id,
value=dev.value)
if not module.check_mode:
try:
dev.approve(location=margs['location'])
except (SessionError, SessionRqstError):
module.fail_json(msg="Unable to approve device")\
module.exit_json(changed=True,
name=dev.name,
id=dev.id,
value=dev.value)
else:
# Check if the device is online
if dev.state in ('OOS-READY', 'IS-READY'):
module.exit_json(changed=False,
name=dev.name,
id=dev.id,
value=dev.value)
else:
module.fail_json(msg="Device is in '%s' state" % dev.state)
def aos_device(module):
margs = module.params
try:
aos = get_aos_session(module, margs['session'])
except Exception:
module.fail_json(msg="Unable to login to the AOS server")
item_name = False
item_id = False
if margs['id'] is not None:
item_id = margs['id']
elif margs['name'] is not None:
item_name = margs['name']
# ----------------------------------------------------
# Find Object if available based on ID or Name
# ----------------------------------------------------
dev = find_collection_item(aos.Devices,
item_name=item_name,
item_id=item_id)
if dev.exists is False:
module.fail_json(msg="unknown device '%s'" % margs['name'])
# ----------------------------------------------------
# Valid device state for reference
# ----------------------------------------------------
# DEVICE_STATE_IS_ACTIVE = 1;
# DEVICE_STATE_IS_READY = 2;
# DEVICE_STATE_IS_NOCOMMS = 3;
# DEVICE_STATE_IS_MAINT = 4;
# DEVICE_STATE_IS_REBOOTING = 5;
# DEVICE_STATE_OOS_STOCKED = 6;
# DEVICE_STATE_OOS_QUARANTINED = 7;
# DEVICE_STATE_OOS_READY = 8;
# DEVICE_STATE_OOS_NOCOMMS = 9;
# DEVICE_STATE_OOS_DECOMM = 10;
# DEVICE_STATE_OOS_MAINT = 11;
# DEVICE_STATE_OOS_REBOOTING = 12;
# DEVICE_STATE_ERROR = 13;
# ----------------------------------------------------
# State == Normal
# ----------------------------------------------------
if margs['state'] == 'normal':
aos_device_normal(module, aos, dev)
def main():
module = AnsibleModule(
argument_spec=dict(
session=dict(required=True, type="dict"),
name=dict(required=False),
id=dict(required=False),
state=dict(choices=['normal'],
default='normal'),
approve=dict(required=False, type='bool'),
location=dict(required=False, default='')
),
mutually_exclusive=[('name', 'id')],
required_one_of=[('name', 'id')],
supports_check_mode=True
)
# Check if aos-pyez is present and match the minimum version
check_aos_version(module, '0.6.0')
aos_device(module)
if __name__ == "__main__":
main()
| 31.215247 | 119 | 0.598046 |
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['deprecated'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: aos_device
author: Damien Garros (@dgarros)
version_added: "2.3"
short_description: Manage Devices on AOS Server
deprecated:
removed_in: "2.9"
why: This module does not support AOS 2.1 or later
alternative: See new modules at U(https://www.ansible.com/ansible-apstra).
description:
- Apstra AOS Device module let you manage your devices in AOS easily. You can
approve devices and define in which state the device should be. Currently
only the state I(normal) is supported but the goal is to extend this module
with additional state. This module is idempotent and support the I(check) mode.
It's using the AOS REST API.
requirements:
- "aos-pyez >= 0.6.0"
options:
session:
description:
- An existing AOS session as obtained by M(aos_login) module.
required: true
name:
description:
- The device serial-number; i.e. uniquely identifies the device in the
AOS system. Only one of I(name) or I(id) can be set.
id:
description:
- The AOS internal id for a device; i.e. uniquely identifies the device in the
AOS system. Only one of I(name) or I(id) can be set.
state:
description:
- Define in which state the device should be. Currently only I(normal)
is supported but the goal is to add I(maint) and I(decomm).
default: normal
choices: ['normal']
approve:
description:
- The approve argument instruct the module to convert a device in quarantine
mode into approved mode.
default: "no"
type: bool
location:
description:
- When approving a device using the I(approve) argument, it's possible
define the location of the device.
'''
EXAMPLES = '''
- name: Approve a new device
aos_device:
session: "{{ aos_session }}"
name: D2060B2F105429GDABCD123
state: 'normal'
approve: true
location: "rack-45, ru-18"
'''
RETURNS = '''
name:
description: Name of the Device, usually the serial-number.
returned: always
type: str
sample: Server-IpAddrs
id:
description: AOS unique ID assigned to the Device
returned: always
type: str
sample: fcc4ac1c-e249-4fe7-b458-2138bfb44c06
value:
description: Value of the object as returned by the AOS Server
returned: always
type: dict
sample: {'...'}
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.aos.aos import HAS_AOS_PYEZ, get_aos_session, check_aos_version, find_collection_item
if HAS_AOS_PYEZ:
from apstra.aosom.exc import SessionError, SessionRqstError
def aos_device_normal(module, aos, dev):
margs = module.params
if margs['approve'] is not None:
if dev.is_approved:
module.exit_json(changed=False,
name=dev.name,
id=dev.id,
value=dev.value)
if not module.check_mode:
try:
dev.approve(location=margs['location'])
except (SessionError, SessionRqstError):
module.fail_json(msg="Unable to approve device")\
module.exit_json(changed=True,
name=dev.name,
id=dev.id,
value=dev.value)
else:
if dev.state in ('OOS-READY', 'IS-READY'):
module.exit_json(changed=False,
name=dev.name,
id=dev.id,
value=dev.value)
else:
module.fail_json(msg="Device is in '%s' state" % dev.state)
def aos_device(module):
margs = module.params
try:
aos = get_aos_session(module, margs['session'])
except Exception:
module.fail_json(msg="Unable to login to the AOS server")
item_name = False
item_id = False
if margs['id'] is not None:
item_id = margs['id']
elif margs['name'] is not None:
item_name = margs['name']
dev = find_collection_item(aos.Devices,
item_name=item_name,
item_id=item_id)
if dev.exists is False:
module.fail_json(msg="unknown device '%s'" % margs['name'])
if margs['state'] == 'normal':
aos_device_normal(module, aos, dev)
def main():
module = AnsibleModule(
argument_spec=dict(
session=dict(required=True, type="dict"),
name=dict(required=False),
id=dict(required=False),
state=dict(choices=['normal'],
default='normal'),
approve=dict(required=False, type='bool'),
location=dict(required=False, default='')
),
mutually_exclusive=[('name', 'id')],
required_one_of=[('name', 'id')],
supports_check_mode=True
)
check_aos_version(module, '0.6.0')
aos_device(module)
if __name__ == "__main__":
main()
| true | true |
f726c272489e3dc1390d84faaafa96fc0f0468af | 6,064 | py | Python | py/featureExtractor.py | Anthony2018/Speech-Enhancement | 9cd0ba6456b946152c17bbccf7c7adaf251a7598 | [
"MIT"
] | null | null | null | py/featureExtractor.py | Anthony2018/Speech-Enhancement | 9cd0ba6456b946152c17bbccf7c7adaf251a7598 | [
"MIT"
] | null | null | null | py/featureExtractor.py | Anthony2018/Speech-Enhancement | 9cd0ba6456b946152c17bbccf7c7adaf251a7598 | [
"MIT"
] | null | null | null | #!/usr/bin/python
# -*- coding: utf-8 -*-
import pandas as pd
import re
import scipy.stats as stats
from scipy.io import wavfile
import numpy as np
import os
raw_folder = './raw'
pattern_date = re.compile('[0-9]{8}')
female_pattern = re.compile('[Ff]emale')
male_pattern = re.compile('[Mm]ale')
american_pattern = re.compile('[Aa]merican')
british_pattern = re.compile('[Bb]ritish')
european_pattern = re.compile('[Ee]uropean')
indian_pattern = re.compile('[Ii]ndian')
australian_pattern = re.compile('[Aa]ustralian')
adult_pattern = re.compile('[Aa]dult')
youth_pattern = re.compile('[Yy]outh')
senior_pattern = re.compile('[Ss]enior')
def get_metadata(readme_file):
#define variables in case startswith does not work:
gender, age_range, pronunciation = 'not specified', 'not specified', 'not specified'
for line in open(readme_file):
if line.startswith("Gender:"):
gender = line.split(":")[1].strip()
elif line.startswith("Age Range:"):
age_range = line.split(":")[1].strip()
elif line.startswith("Pronunciation dialect:"):
pronunciation = line.split(":")[1].strip()
return gender, age_range, pronunciation
def get_features(frequencies):
print "\nExtracting features "
nobs, minmax, mean, variance, skew, kurtosis = stats.describe(frequencies)
median = np.median(frequencies)
mode = stats.mode(frequencies).mode[0]
std = np.std(frequencies)
low,peak = minmax
q75,q25 = np.percentile(frequencies, [75 ,25])
iqr = q75 - q25
return nobs, mean, skew, kurtosis, median, mode, std, low, peak, q25, q75, iqr
def get_date(sample_name):
try:
date = pattern_date.search(sample_name).group()
except AttributeError:
date = '20000000'
return date
def get_user_name(sample_name):
return re.compile("[-_]").split(sample_name)[0]
def homogenize_format(gender, age_range, pronunciation):
#Homogenize gender format
if female_pattern.search(gender): gender = 'Female'
elif male_pattern.search(gender): gender = 'Male'
else: gender = 'not_specified'
#Homogenize pronunciation format to 5/6 categories
if british_pattern.search(pronunciation): pronunciation = 'British'
elif american_pattern.search(pronunciation): pronunciation = 'American'
elif european_pattern.search(pronunciation): pronunciation = 'European'
elif indian_pattern.search(pronunciation): pronunciation = 'Indian'
elif australian_pattern.search(pronunciation): pronunciation = 'Australian'
else: pronunciation = 'Other'
#Homogenize age range format
if adult_pattern.search(age_range): age_range = 'Adult'
elif youth_pattern.search(age_range): age_range = 'Youth'
elif senior_pattern.search(age_range): age_range = 'Senior'
else: age_range = 'Unknown'
return gender, age_range, pronunciation
def get_frequencies(sample_wav_folder):
#extract list of dominant frequencies in sliding windows of duration defined by 'step' for each of the 10 wav files and return an array
frequencies_lol = [] #lol: list of lists
for wav_file in os.listdir(sample_wav_folder):
rate, data = wavfile.read(os.path.join(sample_wav_folder, wav_file))
#get dominating frequencies in sliding windows of 200ms
step = rate/5 #3200 sampling points every 1/5 sec
window_frequencies = []
for i in range(0,len(data),step):
ft = np.fft.fft(data[i:i+step]) #fft returns the list N complex numbers
freqs = np.fft.fftfreq(len(ft)) #fftq tells you the frequencies associated with the coefficients
imax = np.argmax(np.abs(ft))
freq = freqs[imax]
freq_in_hz = abs(freq *rate)
window_frequencies.append(freq_in_hz)
filtered_frequencies = [f for f in window_frequencies if 20<f<280 and not 46<f<66] # I see noise at 50Hz and 60Hz
frequencies_lol.append(filtered_frequencies)
frequencies = [item for sublist in frequencies_lol for item in sublist]
return frequencies
def main():
samples = [d for d in os.listdir(raw_folder) if os.path.isdir(os.path.join(raw_folder, d))]
n_samples = len(samples)
columns=['nobs', 'mean', 'skew', 'kurtosis',
'median', 'mode', 'std', 'low',
'peak', 'q25', 'q75', 'iqr',
'user_name', 'sample_date', 'age_range',
'pronunciation', 'gender' ]
myData = pd.DataFrame(columns=columns)#, index=range(n_samples))
for i in range(n_samples):
sample = sorted(samples)[i]
sample_folder = os.path.join(raw_folder, sample)
sample_wav_folder = os.path.join(sample_folder, 'wav')
readme_file = os.path.join(sample_folder, 'etc', 'README')
date = get_date(sample)
user_name = get_user_name(sample)
if os.path.isfile(readme_file):
gender, age_range, pronunciation = get_metadata(readme_file)
gender, age_range, pronunciation = homogenize_format(gender, age_range, pronunciation)
#Read and extract the information from the wav files:
if os.path.isdir(sample_wav_folder): #some of the samples don't contain a wav folder (Ex: 'LunaTick-20080329-vf1')
frequencies = get_frequencies(sample_wav_folder)
if len(frequencies) > 10:
#for some of the files (ex: Aaron-20130527-giy)
#I only recover frequencies of 0.0 (even if I don't split in chunks) which is not integrated into my lol and frequencies is empty
nobs, mean, skew, kurtosis, median, mode, std, low, peak, q25, q75, iqr = get_features(frequencies)
sample_dict = {'nobs':nobs, 'mean':mean, 'skew':skew, 'kurtosis':kurtosis,
'median':median, 'mode':mode, 'std':std, 'low': low,
'peak':peak, 'q25':q25, 'q75':q75, 'iqr':iqr,
'user_name':user_name, 'sample_date':date,
'age_range':age_range, 'pronunciation':pronunciation,
'gender':gender}
print "\nappending %s sample %s : %s"%(gender, sample, sample_dict)
myData.loc[i] = pd.Series(sample_dict)
myData.to_csv('myData_filtered.csv')
if __name__ == '__main__':
main()
| 32.602151 | 137 | 0.685686 |
import pandas as pd
import re
import scipy.stats as stats
from scipy.io import wavfile
import numpy as np
import os
raw_folder = './raw'
pattern_date = re.compile('[0-9]{8}')
female_pattern = re.compile('[Ff]emale')
male_pattern = re.compile('[Mm]ale')
american_pattern = re.compile('[Aa]merican')
british_pattern = re.compile('[Bb]ritish')
european_pattern = re.compile('[Ee]uropean')
indian_pattern = re.compile('[Ii]ndian')
australian_pattern = re.compile('[Aa]ustralian')
adult_pattern = re.compile('[Aa]dult')
youth_pattern = re.compile('[Yy]outh')
senior_pattern = re.compile('[Ss]enior')
def get_metadata(readme_file):
gender, age_range, pronunciation = 'not specified', 'not specified', 'not specified'
for line in open(readme_file):
if line.startswith("Gender:"):
gender = line.split(":")[1].strip()
elif line.startswith("Age Range:"):
age_range = line.split(":")[1].strip()
elif line.startswith("Pronunciation dialect:"):
pronunciation = line.split(":")[1].strip()
return gender, age_range, pronunciation
def get_features(frequencies):
print "\nExtracting features "
nobs, minmax, mean, variance, skew, kurtosis = stats.describe(frequencies)
median = np.median(frequencies)
mode = stats.mode(frequencies).mode[0]
std = np.std(frequencies)
low,peak = minmax
q75,q25 = np.percentile(frequencies, [75 ,25])
iqr = q75 - q25
return nobs, mean, skew, kurtosis, median, mode, std, low, peak, q25, q75, iqr
def get_date(sample_name):
try:
date = pattern_date.search(sample_name).group()
except AttributeError:
date = '20000000'
return date
def get_user_name(sample_name):
return re.compile("[-_]").split(sample_name)[0]
def homogenize_format(gender, age_range, pronunciation):
if female_pattern.search(gender): gender = 'Female'
elif male_pattern.search(gender): gender = 'Male'
else: gender = 'not_specified'
if british_pattern.search(pronunciation): pronunciation = 'British'
elif american_pattern.search(pronunciation): pronunciation = 'American'
elif european_pattern.search(pronunciation): pronunciation = 'European'
elif indian_pattern.search(pronunciation): pronunciation = 'Indian'
elif australian_pattern.search(pronunciation): pronunciation = 'Australian'
else: pronunciation = 'Other'
if adult_pattern.search(age_range): age_range = 'Adult'
elif youth_pattern.search(age_range): age_range = 'Youth'
elif senior_pattern.search(age_range): age_range = 'Senior'
else: age_range = 'Unknown'
return gender, age_range, pronunciation
def get_frequencies(sample_wav_folder):
frequencies_lol = []
for wav_file in os.listdir(sample_wav_folder):
rate, data = wavfile.read(os.path.join(sample_wav_folder, wav_file))
step = rate/5
window_frequencies = []
for i in range(0,len(data),step):
ft = np.fft.fft(data[i:i+step])
freqs = np.fft.fftfreq(len(ft))
imax = np.argmax(np.abs(ft))
freq = freqs[imax]
freq_in_hz = abs(freq *rate)
window_frequencies.append(freq_in_hz)
filtered_frequencies = [f for f in window_frequencies if 20<f<280 and not 46<f<66]
frequencies_lol.append(filtered_frequencies)
frequencies = [item for sublist in frequencies_lol for item in sublist]
return frequencies
def main():
samples = [d for d in os.listdir(raw_folder) if os.path.isdir(os.path.join(raw_folder, d))]
n_samples = len(samples)
columns=['nobs', 'mean', 'skew', 'kurtosis',
'median', 'mode', 'std', 'low',
'peak', 'q25', 'q75', 'iqr',
'user_name', 'sample_date', 'age_range',
'pronunciation', 'gender' ]
myData = pd.DataFrame(columns=columns)
for i in range(n_samples):
sample = sorted(samples)[i]
sample_folder = os.path.join(raw_folder, sample)
sample_wav_folder = os.path.join(sample_folder, 'wav')
readme_file = os.path.join(sample_folder, 'etc', 'README')
date = get_date(sample)
user_name = get_user_name(sample)
if os.path.isfile(readme_file):
gender, age_range, pronunciation = get_metadata(readme_file)
gender, age_range, pronunciation = homogenize_format(gender, age_range, pronunciation)
if os.path.isdir(sample_wav_folder):
frequencies = get_frequencies(sample_wav_folder)
if len(frequencies) > 10:
#for some of the files (ex: Aaron-20130527-giy)
#I only recover frequencies of 0.0 (even if I don't split in chunks) which is not integrated into my lol and frequencies is empty
nobs, mean, skew, kurtosis, median, mode, std, low, peak, q25, q75, iqr = get_features(frequencies)
sample_dict = {'nobs':nobs, 'mean':mean, 'skew':skew, 'kurtosis':kurtosis,
'median':median, 'mode':mode, 'std':std, 'low': low,
'peak':peak, 'q25':q25, 'q75':q75, 'iqr':iqr,
'user_name':user_name, 'sample_date':date,
'age_range':age_range, 'pronunciation':pronunciation,
'gender':gender}
print "\nappending %s sample %s : %s"%(gender, sample, sample_dict)
myData.loc[i] = pd.Series(sample_dict)
myData.to_csv('myData_filtered.csv')
if __name__ == '__main__':
main()
| false | true |
f726c4efcbe41267a6d7cd4f11809971061e72b5 | 15,902 | py | Python | python/ccxt/async_support/base/exchange.py | halfjuice/ccxt | cc702efbaafba547c3bc973895bd817b3308d072 | [
"MIT"
] | null | null | null | python/ccxt/async_support/base/exchange.py | halfjuice/ccxt | cc702efbaafba547c3bc973895bd817b3308d072 | [
"MIT"
] | null | null | null | python/ccxt/async_support/base/exchange.py | halfjuice/ccxt | cc702efbaafba547c3bc973895bd817b3308d072 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
__version__ = '1.61.55'
# -----------------------------------------------------------------------------
import asyncio
import concurrent.futures
import socket
import certifi
import aiohttp
import ssl
import sys
import yarl
# -----------------------------------------------------------------------------
from ccxt.async_support.base.throttler import Throttler
# -----------------------------------------------------------------------------
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import ExchangeNotAvailable
from ccxt.base.errors import RequestTimeout
from ccxt.base.errors import NotSupported
from ccxt.base.errors import BadSymbol
# -----------------------------------------------------------------------------
from ccxt.base.exchange import Exchange as BaseExchange
# -----------------------------------------------------------------------------
__all__ = [
'BaseExchange',
'Exchange',
]
# -----------------------------------------------------------------------------
class Exchange(BaseExchange):
synchronous = False
def __init__(self, config={}):
if 'asyncio_loop' in config:
self.asyncio_loop = config['asyncio_loop']
self.aiohttp_trust_env = config.get('aiohttp_trust_env', self.aiohttp_trust_env)
self.verify = config.get('verify', self.verify)
self.own_session = 'session' not in config
self.cafile = config.get('cafile', certifi.where())
super(Exchange, self).__init__(config)
self.throttle = None
self.init_rest_rate_limiter()
self.markets_loading = None
self.reloading_markets = False
def init_rest_rate_limiter(self):
self.throttle = Throttler(self.tokenBucket, self.asyncio_loop)
def __del__(self):
if self.session is not None:
self.logger.warning(self.id + " requires to release all resources with an explicit call to the .close() coroutine. If you are using the exchange instance with async coroutines, add exchange.close() to your code into a place when you're done with the exchange and don't need the exchange instance anymore (at the end of your async coroutine).")
if sys.version_info >= (3, 5):
async def __aenter__(self):
self.open()
return self
async def __aexit__(self, exc_type, exc, tb):
await self.close()
def open(self):
if self.asyncio_loop is None:
if sys.version_info >= (3, 7):
self.asyncio_loop = asyncio.get_running_loop()
else:
self.asyncio_loop = asyncio.get_event_loop()
self.throttle.loop = self.asyncio_loop
if self.own_session and self.session is None:
# Create our SSL context object with our CA cert file
context = ssl.create_default_context(cafile=self.cafile) if self.verify else self.verify
# Pass this SSL context to aiohttp and create a TCPConnector
connector = aiohttp.TCPConnector(ssl=context, loop=self.asyncio_loop, enable_cleanup_closed=True)
self.session = aiohttp.ClientSession(loop=self.asyncio_loop, connector=connector, trust_env=self.aiohttp_trust_env)
async def close(self):
if self.session is not None:
if self.own_session:
await self.session.close()
self.session = None
async def fetch2(self, path, api='public', method='GET', params={}, headers=None, body=None, config={}, context={}):
"""A better wrapper over request for deferred signing"""
if self.enableRateLimit:
cost = self.calculate_rate_limiter_cost(api, method, path, params, config, context)
# insert cost into here...
await self.throttle(cost)
self.lastRestRequestTimestamp = self.milliseconds()
request = self.sign(path, api, method, params, headers, body)
return await self.fetch(request['url'], request['method'], request['headers'], request['body'])
async def fetch(self, url, method='GET', headers=None, body=None):
"""Perform a HTTP request and return decoded JSON data"""
request_headers = self.prepare_request_headers(headers)
url = self.proxy + url
if self.verbose:
self.log("\nRequest:", method, url, headers, body)
self.logger.debug("%s %s, Request: %s %s", method, url, headers, body)
request_body = body
encoded_body = body.encode() if body else None
self.open()
session_method = getattr(self.session, method.lower())
http_response = None
http_status_code = None
http_status_text = None
json_response = None
try:
async with session_method(yarl.URL(url, encoded=True),
data=encoded_body,
headers=request_headers,
timeout=(self.timeout / 1000),
proxy=self.aiohttp_proxy) as response:
http_response = await response.text(errors='replace')
# CIMultiDictProxy
raw_headers = response.headers
headers = {}
for header in raw_headers:
if header in headers:
headers[header] = headers[header] + ', ' + raw_headers[header]
else:
headers[header] = raw_headers[header]
http_status_code = response.status
http_status_text = response.reason
http_response = self.on_rest_response(http_status_code, http_status_text, url, method, headers, http_response, request_headers, request_body)
json_response = self.parse_json(http_response)
if self.enableLastHttpResponse:
self.last_http_response = http_response
if self.enableLastResponseHeaders:
self.last_response_headers = headers
if self.enableLastJsonResponse:
self.last_json_response = json_response
if self.verbose:
self.log("\nResponse:", method, url, http_status_code, headers, http_response)
self.logger.debug("%s %s, Response: %s %s %s", method, url, http_status_code, headers, http_response)
except socket.gaierror as e:
details = ' '.join([self.id, method, url])
raise ExchangeNotAvailable(details) from e
except (concurrent.futures.TimeoutError, asyncio.TimeoutError) as e:
details = ' '.join([self.id, method, url])
raise RequestTimeout(details) from e
except aiohttp.ClientConnectionError as e:
details = ' '.join([self.id, method, url])
raise ExchangeNotAvailable(details) from e
except aiohttp.ClientError as e: # base exception class
details = ' '.join([self.id, method, url])
raise ExchangeError(details) from e
self.handle_errors(http_status_code, http_status_text, url, method, headers, http_response, json_response, request_headers, request_body)
self.handle_http_status_code(http_status_code, http_status_text, url, method, http_response)
if json_response is not None:
return json_response
if self.is_text_response(headers):
return http_response
return response.content
async def load_markets_helper(self, reload=False, params={}):
if not reload:
if self.markets:
if not self.markets_by_id:
return self.set_markets(self.markets)
return self.markets
currencies = None
if self.has['fetchCurrencies']:
currencies = await self.fetch_currencies()
markets = await self.fetch_markets(params)
return self.set_markets(markets, currencies)
async def load_markets(self, reload=False, params={}):
if (reload and not self.reloading_markets) or not self.markets_loading:
self.reloading_markets = True
coroutine = self.load_markets_helper(reload, params)
# coroutines can only be awaited once so we wrap it in a task
self.markets_loading = asyncio.ensure_future(coroutine)
try:
result = await self.markets_loading
except Exception as e:
self.reloading_markets = False
self.markets_loading = None
raise e
self.reloading_markets = False
return result
async def fetch_fees(self):
trading = {}
funding = {}
if self.has['fetchTradingFees']:
trading = await self.fetch_trading_fees()
if self.has['fetchFundingFees']:
funding = await self.fetch_funding_fees()
return {
'trading': trading,
'funding': funding,
}
async def load_fees(self, reload=False):
if not reload:
if self.loaded_fees != Exchange.loaded_fees:
return self.loaded_fees
self.loaded_fees = self.deep_extend(self.loaded_fees, await self.fetch_fees())
return self.loaded_fees
async def fetch_markets(self, params={}):
# markets are returned as a list
# currencies are returned as a dict
# this is for historical reasons
# and may be changed for consistency later
return self.to_array(self.markets)
async def fetch_currencies(self, params={}):
# markets are returned as a list
# currencies are returned as a dict
# this is for historical reasons
# and may be changed for consistency later
return self.currencies
async def fetch_status(self, params={}):
if self.has['fetchTime']:
updated = await self.fetch_time(params)
self.status['updated'] = updated
return self.status
async def fetch_order_status(self, id, symbol=None, params={}):
order = await self.fetch_order(id, symbol, params)
return order['status']
async def fetch_partial_balance(self, part, params={}):
balance = await self.fetch_balance(params)
return balance[part]
async def fetch_l2_order_book(self, symbol, limit=None, params={}):
orderbook = await self.fetch_order_book(symbol, limit, params)
return self.extend(orderbook, {
'bids': self.sort_by(self.aggregate(orderbook['bids']), 0, True),
'asks': self.sort_by(self.aggregate(orderbook['asks']), 0),
})
async def perform_order_book_request(self, market, limit=None, params={}):
raise NotSupported(self.id + ' performOrderBookRequest not supported yet')
async def fetch_order_book(self, symbol, limit=None, params={}):
await self.load_markets()
market = self.market(symbol)
orderbook = await self.perform_order_book_request(market, limit, params)
return self.parse_order_book(orderbook, market, limit, params)
async def fetch_ohlcvc(self, symbol, timeframe='1m', since=None, limit=None, params={}):
if not self.has['fetchTrades']:
raise NotSupported('fetch_ohlcv() not implemented yet')
await self.load_markets()
trades = await self.fetch_trades(symbol, since, limit, params)
return self.build_ohlcvc(trades, timeframe, since, limit)
async def fetchOHLCVC(self, symbol, timeframe='1m', since=None, limit=None, params={}):
return await self.fetch_ohlcvc(symbol, timeframe, since, limit, params)
async def fetch_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):
ohlcvs = await self.fetch_ohlcvc(symbol, timeframe, since, limit, params)
return [ohlcv[0:-1] for ohlcv in ohlcvs]
async def fetchOHLCV(self, symbol, timeframe='1m', since=None, limit=None, params={}):
return await self.fetch_ohlcv(symbol, timeframe, since, limit, params)
async def fetch_full_tickers(self, symbols=None, params={}):
return await self.fetch_tickers(symbols, params)
async def edit_order(self, id, symbol, *args):
if not self.enableRateLimit:
raise ExchangeError('updateOrder() requires enableRateLimit = true')
await self.cancel_order(id, symbol)
return await self.create_order(symbol, *args)
async def fetch_balance(self, params={}):
raise NotSupported('fetch_balance() not supported yet')
async def create_order(self, symbol, type, side, amount, price=None, params={}):
raise NotSupported('create_order() not supported yet')
async def cancel_order(self, id, symbol=None, params={}):
raise NotSupported('cancel_order() not supported yet')
async def fetch_trading_fees(self, params={}):
raise NotSupported('fetch_trading_fees() not supported yet')
async def fetch_trading_fee(self, symbol, params={}):
if not self.has['fetchTradingFees']:
raise NotSupported('fetch_trading_fee() not supported yet')
return await self.fetch_trading_fees(params)
async def load_trading_limits(self, symbols=None, reload=False, params={}):
if self.has['fetchTradingLimits']:
if reload or not('limitsLoaded' in list(self.options.keys())):
response = await self.fetch_trading_limits(symbols)
for i in range(0, len(symbols)):
symbol = symbols[i]
self.markets[symbol] = self.deep_extend(self.markets[symbol], response[symbol])
self.options['limitsLoaded'] = self.milliseconds()
return self.markets
async def load_accounts(self, reload=False, params={}):
if reload:
self.accounts = await self.fetch_accounts(params)
else:
if self.accounts:
return self.accounts
else:
self.accounts = await self.fetch_accounts(params)
self.accountsById = self.index_by(self.accounts, 'id')
return self.accounts
async def fetch_ticker(self, symbol, params={}):
if self.has['fetchTickers']:
tickers = await self.fetch_tickers([symbol], params)
ticker = self.safe_value(tickers, symbol)
if ticker is None:
raise BadSymbol(self.id + ' fetchTickers could not find a ticker for ' + symbol)
else:
return ticker
else:
raise NotSupported(self.id + ' fetchTicker not supported yet')
async def fetch_transactions(self, code=None, since=None, limit=None, params={}):
raise NotSupported('fetch_transactions() is not supported yet')
async def fetch_deposits(self, code=None, since=None, limit=None, params={}):
raise NotSupported('fetch_deposits() is not supported yet')
async def fetch_withdrawals(self, code=None, since=None, limit=None, params={}):
raise NotSupported('fetch_withdrawals() is not supported yet')
async def fetch_deposit_address(self, code, params={}):
if self.has['fetchDepositAddresses']:
deposit_addresses = await self.fetch_deposit_addresses([code], params)
deposit_address = self.safe_value(deposit_addresses, code)
if deposit_address is None:
raise NotSupported(self.id + ' fetch_deposit_address could not find a deposit address for ' + code + ', make sure you have created a corresponding deposit address in your wallet on the exchange website')
else:
return deposit_address
else:
raise NotSupported(self.id + ' fetchDepositAddress not supported yet')
async def sleep(self, milliseconds):
return await asyncio.sleep(milliseconds / 1000)
| 43.807163 | 355 | 0.617029 |
__version__ = '1.61.55'
import asyncio
import concurrent.futures
import socket
import certifi
import aiohttp
import ssl
import sys
import yarl
from ccxt.async_support.base.throttler import Throttler
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import ExchangeNotAvailable
from ccxt.base.errors import RequestTimeout
from ccxt.base.errors import NotSupported
from ccxt.base.errors import BadSymbol
from ccxt.base.exchange import Exchange as BaseExchange
__all__ = [
'BaseExchange',
'Exchange',
]
class Exchange(BaseExchange):
synchronous = False
def __init__(self, config={}):
if 'asyncio_loop' in config:
self.asyncio_loop = config['asyncio_loop']
self.aiohttp_trust_env = config.get('aiohttp_trust_env', self.aiohttp_trust_env)
self.verify = config.get('verify', self.verify)
self.own_session = 'session' not in config
self.cafile = config.get('cafile', certifi.where())
super(Exchange, self).__init__(config)
self.throttle = None
self.init_rest_rate_limiter()
self.markets_loading = None
self.reloading_markets = False
def init_rest_rate_limiter(self):
self.throttle = Throttler(self.tokenBucket, self.asyncio_loop)
def __del__(self):
if self.session is not None:
self.logger.warning(self.id + " requires to release all resources with an explicit call to the .close() coroutine. If you are using the exchange instance with async coroutines, add exchange.close() to your code into a place when you're done with the exchange and don't need the exchange instance anymore (at the end of your async coroutine).")
if sys.version_info >= (3, 5):
async def __aenter__(self):
self.open()
return self
async def __aexit__(self, exc_type, exc, tb):
await self.close()
def open(self):
if self.asyncio_loop is None:
if sys.version_info >= (3, 7):
self.asyncio_loop = asyncio.get_running_loop()
else:
self.asyncio_loop = asyncio.get_event_loop()
self.throttle.loop = self.asyncio_loop
if self.own_session and self.session is None:
context = ssl.create_default_context(cafile=self.cafile) if self.verify else self.verify
connector = aiohttp.TCPConnector(ssl=context, loop=self.asyncio_loop, enable_cleanup_closed=True)
self.session = aiohttp.ClientSession(loop=self.asyncio_loop, connector=connector, trust_env=self.aiohttp_trust_env)
async def close(self):
if self.session is not None:
if self.own_session:
await self.session.close()
self.session = None
async def fetch2(self, path, api='public', method='GET', params={}, headers=None, body=None, config={}, context={}):
if self.enableRateLimit:
cost = self.calculate_rate_limiter_cost(api, method, path, params, config, context)
await self.throttle(cost)
self.lastRestRequestTimestamp = self.milliseconds()
request = self.sign(path, api, method, params, headers, body)
return await self.fetch(request['url'], request['method'], request['headers'], request['body'])
async def fetch(self, url, method='GET', headers=None, body=None):
request_headers = self.prepare_request_headers(headers)
url = self.proxy + url
if self.verbose:
self.log("\nRequest:", method, url, headers, body)
self.logger.debug("%s %s, Request: %s %s", method, url, headers, body)
request_body = body
encoded_body = body.encode() if body else None
self.open()
session_method = getattr(self.session, method.lower())
http_response = None
http_status_code = None
http_status_text = None
json_response = None
try:
async with session_method(yarl.URL(url, encoded=True),
data=encoded_body,
headers=request_headers,
timeout=(self.timeout / 1000),
proxy=self.aiohttp_proxy) as response:
http_response = await response.text(errors='replace')
raw_headers = response.headers
headers = {}
for header in raw_headers:
if header in headers:
headers[header] = headers[header] + ', ' + raw_headers[header]
else:
headers[header] = raw_headers[header]
http_status_code = response.status
http_status_text = response.reason
http_response = self.on_rest_response(http_status_code, http_status_text, url, method, headers, http_response, request_headers, request_body)
json_response = self.parse_json(http_response)
if self.enableLastHttpResponse:
self.last_http_response = http_response
if self.enableLastResponseHeaders:
self.last_response_headers = headers
if self.enableLastJsonResponse:
self.last_json_response = json_response
if self.verbose:
self.log("\nResponse:", method, url, http_status_code, headers, http_response)
self.logger.debug("%s %s, Response: %s %s %s", method, url, http_status_code, headers, http_response)
except socket.gaierror as e:
details = ' '.join([self.id, method, url])
raise ExchangeNotAvailable(details) from e
except (concurrent.futures.TimeoutError, asyncio.TimeoutError) as e:
details = ' '.join([self.id, method, url])
raise RequestTimeout(details) from e
except aiohttp.ClientConnectionError as e:
details = ' '.join([self.id, method, url])
raise ExchangeNotAvailable(details) from e
except aiohttp.ClientError as e:
details = ' '.join([self.id, method, url])
raise ExchangeError(details) from e
self.handle_errors(http_status_code, http_status_text, url, method, headers, http_response, json_response, request_headers, request_body)
self.handle_http_status_code(http_status_code, http_status_text, url, method, http_response)
if json_response is not None:
return json_response
if self.is_text_response(headers):
return http_response
return response.content
async def load_markets_helper(self, reload=False, params={}):
if not reload:
if self.markets:
if not self.markets_by_id:
return self.set_markets(self.markets)
return self.markets
currencies = None
if self.has['fetchCurrencies']:
currencies = await self.fetch_currencies()
markets = await self.fetch_markets(params)
return self.set_markets(markets, currencies)
async def load_markets(self, reload=False, params={}):
if (reload and not self.reloading_markets) or not self.markets_loading:
self.reloading_markets = True
coroutine = self.load_markets_helper(reload, params)
self.markets_loading = asyncio.ensure_future(coroutine)
try:
result = await self.markets_loading
except Exception as e:
self.reloading_markets = False
self.markets_loading = None
raise e
self.reloading_markets = False
return result
async def fetch_fees(self):
trading = {}
funding = {}
if self.has['fetchTradingFees']:
trading = await self.fetch_trading_fees()
if self.has['fetchFundingFees']:
funding = await self.fetch_funding_fees()
return {
'trading': trading,
'funding': funding,
}
async def load_fees(self, reload=False):
if not reload:
if self.loaded_fees != Exchange.loaded_fees:
return self.loaded_fees
self.loaded_fees = self.deep_extend(self.loaded_fees, await self.fetch_fees())
return self.loaded_fees
async def fetch_markets(self, params={}):
return self.to_array(self.markets)
async def fetch_currencies(self, params={}):
return self.currencies
async def fetch_status(self, params={}):
if self.has['fetchTime']:
updated = await self.fetch_time(params)
self.status['updated'] = updated
return self.status
async def fetch_order_status(self, id, symbol=None, params={}):
order = await self.fetch_order(id, symbol, params)
return order['status']
async def fetch_partial_balance(self, part, params={}):
balance = await self.fetch_balance(params)
return balance[part]
async def fetch_l2_order_book(self, symbol, limit=None, params={}):
orderbook = await self.fetch_order_book(symbol, limit, params)
return self.extend(orderbook, {
'bids': self.sort_by(self.aggregate(orderbook['bids']), 0, True),
'asks': self.sort_by(self.aggregate(orderbook['asks']), 0),
})
async def perform_order_book_request(self, market, limit=None, params={}):
raise NotSupported(self.id + ' performOrderBookRequest not supported yet')
async def fetch_order_book(self, symbol, limit=None, params={}):
await self.load_markets()
market = self.market(symbol)
orderbook = await self.perform_order_book_request(market, limit, params)
return self.parse_order_book(orderbook, market, limit, params)
async def fetch_ohlcvc(self, symbol, timeframe='1m', since=None, limit=None, params={}):
if not self.has['fetchTrades']:
raise NotSupported('fetch_ohlcv() not implemented yet')
await self.load_markets()
trades = await self.fetch_trades(symbol, since, limit, params)
return self.build_ohlcvc(trades, timeframe, since, limit)
async def fetchOHLCVC(self, symbol, timeframe='1m', since=None, limit=None, params={}):
return await self.fetch_ohlcvc(symbol, timeframe, since, limit, params)
async def fetch_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):
ohlcvs = await self.fetch_ohlcvc(symbol, timeframe, since, limit, params)
return [ohlcv[0:-1] for ohlcv in ohlcvs]
async def fetchOHLCV(self, symbol, timeframe='1m', since=None, limit=None, params={}):
return await self.fetch_ohlcv(symbol, timeframe, since, limit, params)
async def fetch_full_tickers(self, symbols=None, params={}):
return await self.fetch_tickers(symbols, params)
async def edit_order(self, id, symbol, *args):
if not self.enableRateLimit:
raise ExchangeError('updateOrder() requires enableRateLimit = true')
await self.cancel_order(id, symbol)
return await self.create_order(symbol, *args)
async def fetch_balance(self, params={}):
raise NotSupported('fetch_balance() not supported yet')
async def create_order(self, symbol, type, side, amount, price=None, params={}):
raise NotSupported('create_order() not supported yet')
async def cancel_order(self, id, symbol=None, params={}):
raise NotSupported('cancel_order() not supported yet')
async def fetch_trading_fees(self, params={}):
raise NotSupported('fetch_trading_fees() not supported yet')
async def fetch_trading_fee(self, symbol, params={}):
if not self.has['fetchTradingFees']:
raise NotSupported('fetch_trading_fee() not supported yet')
return await self.fetch_trading_fees(params)
async def load_trading_limits(self, symbols=None, reload=False, params={}):
if self.has['fetchTradingLimits']:
if reload or not('limitsLoaded' in list(self.options.keys())):
response = await self.fetch_trading_limits(symbols)
for i in range(0, len(symbols)):
symbol = symbols[i]
self.markets[symbol] = self.deep_extend(self.markets[symbol], response[symbol])
self.options['limitsLoaded'] = self.milliseconds()
return self.markets
async def load_accounts(self, reload=False, params={}):
if reload:
self.accounts = await self.fetch_accounts(params)
else:
if self.accounts:
return self.accounts
else:
self.accounts = await self.fetch_accounts(params)
self.accountsById = self.index_by(self.accounts, 'id')
return self.accounts
async def fetch_ticker(self, symbol, params={}):
if self.has['fetchTickers']:
tickers = await self.fetch_tickers([symbol], params)
ticker = self.safe_value(tickers, symbol)
if ticker is None:
raise BadSymbol(self.id + ' fetchTickers could not find a ticker for ' + symbol)
else:
return ticker
else:
raise NotSupported(self.id + ' fetchTicker not supported yet')
async def fetch_transactions(self, code=None, since=None, limit=None, params={}):
raise NotSupported('fetch_transactions() is not supported yet')
async def fetch_deposits(self, code=None, since=None, limit=None, params={}):
raise NotSupported('fetch_deposits() is not supported yet')
async def fetch_withdrawals(self, code=None, since=None, limit=None, params={}):
raise NotSupported('fetch_withdrawals() is not supported yet')
async def fetch_deposit_address(self, code, params={}):
if self.has['fetchDepositAddresses']:
deposit_addresses = await self.fetch_deposit_addresses([code], params)
deposit_address = self.safe_value(deposit_addresses, code)
if deposit_address is None:
raise NotSupported(self.id + ' fetch_deposit_address could not find a deposit address for ' + code + ', make sure you have created a corresponding deposit address in your wallet on the exchange website')
else:
return deposit_address
else:
raise NotSupported(self.id + ' fetchDepositAddress not supported yet')
async def sleep(self, milliseconds):
return await asyncio.sleep(milliseconds / 1000)
| true | true |
f726c5d759f5490c1ae882cd36b4a8678f29a3ed | 4,451 | py | Python | dark/process.py | UdoGi/dark-matter | 3d49e89fa5e81f83144119f6216c5774176d203b | [
"MIT"
] | 10 | 2016-03-09T09:43:14.000Z | 2021-04-03T21:46:12.000Z | dark/process.py | terrycojones/dark-matter | 67d16f870db6b4239e17e542bc6e3f072dc29c75 | [
"MIT"
] | 332 | 2015-01-07T12:37:30.000Z | 2022-01-20T15:48:11.000Z | dark/process.py | terrycojones/dark-matter | 67d16f870db6b4239e17e542bc6e3f072dc29c75 | [
"MIT"
] | 4 | 2016-03-08T14:56:39.000Z | 2021-01-27T08:11:27.000Z | from __future__ import division, print_function
import six
from time import time, ctime
from subprocess import PIPE, CalledProcessError
if six.PY3:
from subprocess import run
else:
from subprocess import check_call
class Executor(object):
"""
Log and execute shell commands.
@param dryRun: If C{True}, do not execute commands, just log them.
This sets the default and can be overidden for a specific command
by passing C{dryRun} to the C{execute} method.
"""
def __init__(self, dryRun=False):
self.dryRun = dryRun
self.log = [
'# Executor created at %s. Dry run = %s.' % (ctime(time()), dryRun)
]
def dryRun(self):
"""
Is this a dry run?
@return: A Boolean indicating whether this is a dry run.
"""
return self._dryRun
def execute(self, command, dryRun=None, useStderr=True, **kwargs):
"""
Execute (or simulate) a command. Add to our log.
@param command: Either a C{str} command (which will be passed to the
shell) or a C{list} of command arguments (including the executable
name), in which case the shell is not used.
@param dryRun: If C{True}, do not execute commands, just log them.
If C{False}, execute the commands. If not given or C{None}, use
the default setting (in C{self.dryRun}).
@param useStderr: If C{True} print a summary of the command standard
output and standard error to sys.stderr if the command results in
an exception. If a function is passed, the exception is passed to
the function and the summary is printed to sys.stderr if the
function returns C{True}.
@param kwargs: Keyword arguments that will be passed to subprocess.run
(or subprocess.check_call for Python version 2). Note that keyword
arguments are not currently logged (the logging is slightly
problematic since a keyword argument might be an environment
dictionary).
@raise CalledProcessError: If the command results in an error.
@return: A C{CompletedProcess} instance. This has attributes such as
C{returncode}, C{stdout}, and C{stderr}. See pydoc subprocess.
If C{dryRun} is C{True}, C{None} is returned.
"""
if isinstance(command, six.string_types):
# Can't have newlines in a command given to the shell.
strCommand = command = command.replace('\n', ' ').strip()
shell = True
else:
strCommand = ' '.join(command)
shell = False
dryRun = self.dryRun if dryRun is None else dryRun
if dryRun:
self.log.append('$ ' + strCommand)
return
start = time()
self.log.extend([
'# Start command (shell=%s) at %s' % (shell, ctime(start)),
'$ ' + strCommand,
])
if six.PY3:
try:
result = run(command, check=True, stdout=PIPE, stderr=PIPE,
shell=shell, universal_newlines=True, **kwargs)
except CalledProcessError as e:
if callable(useStderr):
useStderr = useStderr(e)
if useStderr:
import sys
print('CalledProcessError:', e, file=sys.stderr)
print('STDOUT:\n%s' % e.stdout, file=sys.stderr)
print('STDERR:\n%s' % e.stderr, file=sys.stderr)
raise
else:
try:
result = check_call(command, stdout=PIPE, stderr=PIPE,
shell=shell, universal_newlines=True,
**kwargs)
except CalledProcessError as e:
if callable(useStderr):
useStderr = useStderr(e)
if useStderr:
import sys
print('CalledProcessError:', e, file=sys.stderr)
print('Return code: %s' % e.returncode, file=sys.stderr)
print('Output:\n%s' % e.output, file=sys.stderr)
raise
stop = time()
elapsed = (stop - start)
self.log.extend([
'# Stop command at %s' % ctime(stop),
'# Elapsed = %f seconds' % elapsed,
])
return result
| 38.37069 | 79 | 0.560324 | from __future__ import division, print_function
import six
from time import time, ctime
from subprocess import PIPE, CalledProcessError
if six.PY3:
from subprocess import run
else:
from subprocess import check_call
class Executor(object):
def __init__(self, dryRun=False):
self.dryRun = dryRun
self.log = [
'# Executor created at %s. Dry run = %s.' % (ctime(time()), dryRun)
]
def dryRun(self):
return self._dryRun
def execute(self, command, dryRun=None, useStderr=True, **kwargs):
if isinstance(command, six.string_types):
strCommand = command = command.replace('\n', ' ').strip()
shell = True
else:
strCommand = ' '.join(command)
shell = False
dryRun = self.dryRun if dryRun is None else dryRun
if dryRun:
self.log.append('$ ' + strCommand)
return
start = time()
self.log.extend([
'
'$ ' + strCommand,
])
if six.PY3:
try:
result = run(command, check=True, stdout=PIPE, stderr=PIPE,
shell=shell, universal_newlines=True, **kwargs)
except CalledProcessError as e:
if callable(useStderr):
useStderr = useStderr(e)
if useStderr:
import sys
print('CalledProcessError:', e, file=sys.stderr)
print('STDOUT:\n%s' % e.stdout, file=sys.stderr)
print('STDERR:\n%s' % e.stderr, file=sys.stderr)
raise
else:
try:
result = check_call(command, stdout=PIPE, stderr=PIPE,
shell=shell, universal_newlines=True,
**kwargs)
except CalledProcessError as e:
if callable(useStderr):
useStderr = useStderr(e)
if useStderr:
import sys
print('CalledProcessError:', e, file=sys.stderr)
print('Return code: %s' % e.returncode, file=sys.stderr)
print('Output:\n%s' % e.output, file=sys.stderr)
raise
stop = time()
elapsed = (stop - start)
self.log.extend([
'
'
])
return result
| true | true |
f726c61ee010e4614285c7fc75b5c47cb8f51c57 | 318 | py | Python | Preprocessing/scripts/dec_user_id.py | udhavsethi/contentNCF | d11273956bf9c793eb616cde9c3da01c70e5403b | [
"Apache-2.0"
] | 2 | 2021-09-16T02:14:57.000Z | 2022-02-02T01:16:26.000Z | Preprocessing/scripts/dec_user_id.py | udhavsethi/contentNCF | d11273956bf9c793eb616cde9c3da01c70e5403b | [
"Apache-2.0"
] | null | null | null | Preprocessing/scripts/dec_user_id.py | udhavsethi/contentNCF | d11273956bf9c793eb616cde9c3da01c70e5403b | [
"Apache-2.0"
] | null | null | null | infile = open('500_users_to_images.train', 'r')
outfile = open('pinterest.data', 'w')
for line in infile.readlines():
user_id, img_id, img_url = line.strip().split('\t')
dec_user_id = str(int(user_id) - 1)
outfile.write("{}\t{}\t{}\n".format(dec_user_id, img_id, img_url))
infile.close()
outfile.close()
| 28.909091 | 70 | 0.666667 | infile = open('500_users_to_images.train', 'r')
outfile = open('pinterest.data', 'w')
for line in infile.readlines():
user_id, img_id, img_url = line.strip().split('\t')
dec_user_id = str(int(user_id) - 1)
outfile.write("{}\t{}\t{}\n".format(dec_user_id, img_id, img_url))
infile.close()
outfile.close()
| true | true |
f726c69eef6c7661033f52ac8cc3885f33c80910 | 686 | py | Python | app/core/migrations/0003_ingredient.py | amaurycoudr/recipe-app-api | ab4da3d5553230d9b15ddc6f97091e3f01cc348e | [
"MIT"
] | null | null | null | app/core/migrations/0003_ingredient.py | amaurycoudr/recipe-app-api | ab4da3d5553230d9b15ddc6f97091e3f01cc348e | [
"MIT"
] | null | null | null | app/core/migrations/0003_ingredient.py | amaurycoudr/recipe-app-api | ab4da3d5553230d9b15ddc6f97091e3f01cc348e | [
"MIT"
] | null | null | null | # Generated by Django 2.1.15 on 2020-08-24 07:56
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('core', '0002_tag'),
]
operations = [
migrations.CreateModel(
name='Ingredient',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| 28.583333 | 118 | 0.618076 |
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('core', '0002_tag'),
]
operations = [
migrations.CreateModel(
name='Ingredient',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| true | true |
f726c7af66c2d6b8cae93c16306362700e6e476b | 2,702 | py | Python | examples/face_recognition_svm.py | viettriit2110/face_recognition | 0e1821af6538c573ed4a87acc361c44900f849eb | [
"MIT"
] | 2 | 2019-11-12T06:22:45.000Z | 2019-11-12T14:30:00.000Z | examples/face_recognition_svm.py | viettriit2110/face_recognition | 0e1821af6538c573ed4a87acc361c44900f849eb | [
"MIT"
] | null | null | null | examples/face_recognition_svm.py | viettriit2110/face_recognition | 0e1821af6538c573ed4a87acc361c44900f849eb | [
"MIT"
] | null | null | null | # Train multiple images per person
# Find and recognize faces in an image using a SVC with scikit-learn
"""
Structure:
<test_image>.jpg
<train_dir>/
<person_1>/
<person_1_face-1>.jpg
<person_1_face-2>.jpg
.
.
<person_1_face-n>.jpg
<person_2>/
<person_2_face-1>.jpg
<person_2_face-2>.jpg
.
.
<person_2_face-n>.jpg
.
.
<person_n>/
<person_n_face-1>.jpg
<person_n_face-2>.jpg
.
.
<person_n_face-n>.jpg
"""
import face_recognition
from sklearn import svm
import os
# Training the SVC classifier
# The training data would be all the face encodings from all the known images and the labels are their names
encodings = []
names = []
# Training directory
train_dir = os.listdir('/train_dir/')
# Loop through each person in the training directory
for person in train_dir:
pix = os.listdir("/train_dir/" + person)
# Loop through each training image for the current person
for person_img in pix:
# Get the face encodings for the face in each image file
face = face_recognition.load_image_file("/train_dir/" + person + "/" + person_img)
face_bounding_boxes = face_recognition.face_locations(face)
#If training image contains none or more than faces, print an error message and exit
if len(face_bounding_boxes) != 1:
print(person + "/" + person_img + " contains none or more than one faces and can't be used for training.")
exit()
else:
face_enc = face_recognition.face_encodings(face)[0]
# Add face encoding for current image with corresponding label (name) to the training data
encodings.append(face_enc)
names.append(person)
# Create and train the SVC classifier
clf = svm.SVC(gamma='scale')
clf.fit(encodings,names)
# Load the test image with unknown faces into a numpy array
test_image = face_recognition.load_image_file('test_image.jpg')
# Find all the faces in the test image using the default HOG-based model
face_locations = face_recognition.face_locations(test_image)
no = len(face_locations)
print("Number of faces detected: ", no)
# Predict all the faces in the test image using the trained classifier
print("Found:")
for i in range(no):
test_image_enc = face_recognition.face_encodings(test_image)[i]
name = clf.predict([test_image_enc])
print(*name)
| 33.358025 | 119 | 0.611769 |
import face_recognition
from sklearn import svm
import os
encodings = []
names = []
train_dir = os.listdir('/train_dir/')
for person in train_dir:
pix = os.listdir("/train_dir/" + person)
for person_img in pix:
face = face_recognition.load_image_file("/train_dir/" + person + "/" + person_img)
face_bounding_boxes = face_recognition.face_locations(face)
if len(face_bounding_boxes) != 1:
print(person + "/" + person_img + " contains none or more than one faces and can't be used for training.")
exit()
else:
face_enc = face_recognition.face_encodings(face)[0]
# Add face encoding for current image with corresponding label (name) to the training data
encodings.append(face_enc)
names.append(person)
# Create and train the SVC classifier
clf = svm.SVC(gamma='scale')
clf.fit(encodings,names)
# Load the test image with unknown faces into a numpy array
test_image = face_recognition.load_image_file('test_image.jpg')
# Find all the faces in the test image using the default HOG-based model
face_locations = face_recognition.face_locations(test_image)
no = len(face_locations)
print("Number of faces detected: ", no)
# Predict all the faces in the test image using the trained classifier
print("Found:")
for i in range(no):
test_image_enc = face_recognition.face_encodings(test_image)[i]
name = clf.predict([test_image_enc])
print(*name)
| true | true |
f726c7e3f9f0e96210a1b6a0a5aa57076aacdff1 | 17,093 | py | Python | aldryn_newsblog/south_migrations/0010_auto__add_unique_articletranslation_language_code_slug__del_field_arti.py | what-digital/aldryn-newsblog-blog-teaser-size | c52cb256fe3b608838f2184de9575b6cbbfd5f8e | [
"BSD-3-Clause"
] | null | null | null | aldryn_newsblog/south_migrations/0010_auto__add_unique_articletranslation_language_code_slug__del_field_arti.py | what-digital/aldryn-newsblog-blog-teaser-size | c52cb256fe3b608838f2184de9575b6cbbfd5f8e | [
"BSD-3-Clause"
] | null | null | null | aldryn_newsblog/south_migrations/0010_auto__add_unique_articletranslation_language_code_slug__del_field_arti.py | what-digital/aldryn-newsblog-blog-teaser-size | c52cb256fe3b608838f2184de9575b6cbbfd5f8e | [
"BSD-3-Clause"
] | 2 | 2019-10-22T04:30:28.000Z | 2019-10-22T05:09:16.000Z | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
from aldryn_newsblog.utils.migration import rename_tables_old_to_new, rename_tables_new_to_old
class Migration(SchemaMigration):
def forwards(self, orm):
rename_tables_old_to_new(db)
# Adding unique constraint on 'ArticleTranslation', fields ['language_code', 'slug']
db.create_unique(u'aldryn_newsblog_article_translation', ['language_code', 'slug'])
# Deleting field 'Article.slug'
db.delete_column(u'aldryn_newsblog_article', 'slug')
def backwards(self, orm):
rename_tables_new_to_old(db)
# Removing unique constraint on 'ArticleTranslation', fields ['language_code', 'slug']
db.delete_unique(u'aldryn_newsblog_article_translation', ['language_code', 'slug'])
# Adding field 'Article.slug'
db.add_column(u'aldryn_newsblog_article', 'slug',
self.gf('django.db.models.fields.SlugField')(default='', max_length=255, blank=True),
keep_default=False)
models = {
u'aldryn_categories.category': {
'Meta': {'object_name': 'Category'},
'depth': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'rgt': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
u'aldryn_newsblog.article': {
'Meta': {'ordering': "[u'-publishing_date']", 'object_name': 'Article'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['aldryn_people.Person']", 'null': 'True', 'blank': 'True'}),
'categories': ('aldryn_categories.fields.CategoryManyToManyField', [], {'to': u"orm['aldryn_categories.Category']", 'symmetrical': 'False', 'blank': 'True'}),
'content': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'aldryn_newsblog_articles'", 'unique': 'True', 'null': 'True', 'to': "orm['cms.Placeholder']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'namespace': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['aldryn_newsblog.NewsBlogConfig']"}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'publishing_date': ('django.db.models.fields.DateTimeField', [], {})
},
u'aldryn_newsblog.articletranslation': {
'Meta': {'unique_together': "[(u'language_code', u'slug'), (u'language_code', u'master')]", 'object_name': 'ArticleTranslation', 'db_table': "u'aldryn_newsblog_article_translation'"},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language_code': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}),
'lead_in': ('djangocms_text_ckeditor.fields.HTMLField', [], {'default': "u''"}),
u'master': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'translations'", 'null': 'True', 'to': u"orm['aldryn_newsblog.Article']"}),
'meta_description': ('django.db.models.fields.TextField', [], {'default': "u''", 'blank': 'True'}),
'meta_keywords': ('django.db.models.fields.TextField', [], {'default': "u''", 'blank': 'True'}),
'meta_title': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '255', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '234'})
},
u'aldryn_newsblog.latestentriesplugin': {
'Meta': {'object_name': 'LatestEntriesPlugin', '_ormbases': ['cms.CMSPlugin']},
u'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['cms.CMSPlugin']", 'unique': 'True', 'primary_key': 'True'}),
'latest_entries': ('django.db.models.fields.IntegerField', [], {'default': '5'})
},
u'aldryn_newsblog.newsblogconfig': {
'Meta': {'unique_together': "(('type', 'namespace'),)", 'object_name': 'NewsBlogConfig'},
'app_data': ('app_data.fields.AppDataField', [], {'default': "'{}'"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'namespace': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '100'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'aldryn_people.group': {
'Meta': {'object_name': 'Group'},
'address': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'default': "''", 'max_length': '75', 'blank': 'True'}),
'fax': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'postal_code': ('django.db.models.fields.CharField', [], {'max_length': '20', 'blank': 'True'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
},
u'aldryn_people.person': {
'Meta': {'object_name': 'Person'},
'email': ('django.db.models.fields.EmailField', [], {'default': "''", 'max_length': '75', 'blank': 'True'}),
'fax': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['aldryn_people.Group']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mobile': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '255', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'vcard_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'visual': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['filer.Image']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
},
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'cms.cmsplugin': {
'Meta': {'object_name': 'CMSPlugin'},
'changed_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.CMSPlugin']", 'null': 'True', 'blank': 'True'}),
'placeholder': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Placeholder']", 'null': 'True'}),
'plugin_type': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'position': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'cms.placeholder': {
'Meta': {'object_name': 'Placeholder'},
'default_width': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slot': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'filer.file': {
'Meta': {'object_name': 'File'},
'_file_size': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'file': ('django.db.models.fields.files.FileField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'folder': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'all_files'", 'null': 'True', 'to': u"orm['filer.Folder']"}),
'has_all_mandatory_data': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_public': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '255', 'blank': 'True'}),
'original_filename': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'owned_files'", 'null': 'True', 'to': u"orm['auth.User']"}),
'polymorphic_ctype': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'polymorphic_filer.file_set'", 'null': 'True', 'to': u"orm['contenttypes.ContentType']"}),
'sha1': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '40', 'blank': 'True'}),
'uploaded_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'})
},
u'filer.folder': {
'Meta': {'ordering': "(u'name',)", 'unique_together': "((u'parent', u'name'),)", 'object_name': 'Folder'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
u'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
u'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'filer_owned_folders'", 'null': 'True', 'to': u"orm['auth.User']"}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'children'", 'null': 'True', 'to': u"orm['filer.Folder']"}),
u'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
u'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'uploaded_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'})
},
'filer.image': {
'Meta': {'object_name': 'Image', '_ormbases': [u'filer.File']},
'_height': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'_width': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'author': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'date_taken': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'default_alt_text': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'default_caption': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
u'file_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['filer.File']", 'unique': 'True', 'primary_key': 'True'}),
'must_always_publish_author_credit': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'must_always_publish_copyright': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'subject_location': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '64', 'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['aldryn_newsblog'] | 83.789216 | 195 | 0.572398 |
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
from aldryn_newsblog.utils.migration import rename_tables_old_to_new, rename_tables_new_to_old
class Migration(SchemaMigration):
def forwards(self, orm):
rename_tables_old_to_new(db)
db.create_unique(u'aldryn_newsblog_article_translation', ['language_code', 'slug'])
db.delete_column(u'aldryn_newsblog_article', 'slug')
def backwards(self, orm):
rename_tables_new_to_old(db)
db.delete_unique(u'aldryn_newsblog_article_translation', ['language_code', 'slug'])
db.add_column(u'aldryn_newsblog_article', 'slug',
self.gf('django.db.models.fields.SlugField')(default='', max_length=255, blank=True),
keep_default=False)
models = {
u'aldryn_categories.category': {
'Meta': {'object_name': 'Category'},
'depth': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'rgt': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
u'aldryn_newsblog.article': {
'Meta': {'ordering': "[u'-publishing_date']", 'object_name': 'Article'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['aldryn_people.Person']", 'null': 'True', 'blank': 'True'}),
'categories': ('aldryn_categories.fields.CategoryManyToManyField', [], {'to': u"orm['aldryn_categories.Category']", 'symmetrical': 'False', 'blank': 'True'}),
'content': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'aldryn_newsblog_articles'", 'unique': 'True', 'null': 'True', 'to': "orm['cms.Placeholder']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'namespace': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['aldryn_newsblog.NewsBlogConfig']"}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'publishing_date': ('django.db.models.fields.DateTimeField', [], {})
},
u'aldryn_newsblog.articletranslation': {
'Meta': {'unique_together': "[(u'language_code', u'slug'), (u'language_code', u'master')]", 'object_name': 'ArticleTranslation', 'db_table': "u'aldryn_newsblog_article_translation'"},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language_code': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}),
'lead_in': ('djangocms_text_ckeditor.fields.HTMLField', [], {'default': "u''"}),
u'master': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'translations'", 'null': 'True', 'to': u"orm['aldryn_newsblog.Article']"}),
'meta_description': ('django.db.models.fields.TextField', [], {'default': "u''", 'blank': 'True'}),
'meta_keywords': ('django.db.models.fields.TextField', [], {'default': "u''", 'blank': 'True'}),
'meta_title': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '255', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '234'})
},
u'aldryn_newsblog.latestentriesplugin': {
'Meta': {'object_name': 'LatestEntriesPlugin', '_ormbases': ['cms.CMSPlugin']},
u'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['cms.CMSPlugin']", 'unique': 'True', 'primary_key': 'True'}),
'latest_entries': ('django.db.models.fields.IntegerField', [], {'default': '5'})
},
u'aldryn_newsblog.newsblogconfig': {
'Meta': {'unique_together': "(('type', 'namespace'),)", 'object_name': 'NewsBlogConfig'},
'app_data': ('app_data.fields.AppDataField', [], {'default': "'{}'"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'namespace': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '100'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'aldryn_people.group': {
'Meta': {'object_name': 'Group'},
'address': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'default': "''", 'max_length': '75', 'blank': 'True'}),
'fax': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'postal_code': ('django.db.models.fields.CharField', [], {'max_length': '20', 'blank': 'True'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
},
u'aldryn_people.person': {
'Meta': {'object_name': 'Person'},
'email': ('django.db.models.fields.EmailField', [], {'default': "''", 'max_length': '75', 'blank': 'True'}),
'fax': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['aldryn_people.Group']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mobile': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '255', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'vcard_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'visual': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['filer.Image']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
},
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'cms.cmsplugin': {
'Meta': {'object_name': 'CMSPlugin'},
'changed_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.CMSPlugin']", 'null': 'True', 'blank': 'True'}),
'placeholder': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Placeholder']", 'null': 'True'}),
'plugin_type': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'position': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'cms.placeholder': {
'Meta': {'object_name': 'Placeholder'},
'default_width': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slot': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'filer.file': {
'Meta': {'object_name': 'File'},
'_file_size': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'file': ('django.db.models.fields.files.FileField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'folder': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'all_files'", 'null': 'True', 'to': u"orm['filer.Folder']"}),
'has_all_mandatory_data': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_public': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '255', 'blank': 'True'}),
'original_filename': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'owned_files'", 'null': 'True', 'to': u"orm['auth.User']"}),
'polymorphic_ctype': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'polymorphic_filer.file_set'", 'null': 'True', 'to': u"orm['contenttypes.ContentType']"}),
'sha1': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '40', 'blank': 'True'}),
'uploaded_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'})
},
u'filer.folder': {
'Meta': {'ordering': "(u'name',)", 'unique_together': "((u'parent', u'name'),)", 'object_name': 'Folder'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
u'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
u'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'filer_owned_folders'", 'null': 'True', 'to': u"orm['auth.User']"}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'children'", 'null': 'True', 'to': u"orm['filer.Folder']"}),
u'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
u'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'uploaded_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'})
},
'filer.image': {
'Meta': {'object_name': 'Image', '_ormbases': [u'filer.File']},
'_height': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'_width': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'author': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'date_taken': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'default_alt_text': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'default_caption': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
u'file_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['filer.File']", 'unique': 'True', 'primary_key': 'True'}),
'must_always_publish_author_credit': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'must_always_publish_copyright': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'subject_location': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '64', 'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['aldryn_newsblog'] | true | true |
f726c922dcd0dfb1fe6fe59bdb40b901325c9e0a | 33,645 | py | Python | ambassador/ambassador/config/resourcefetcher.py | Andrei-Predoiu/ambassador | efbd0ac8d65e36eab68997051167bc3eea165f35 | [
"Apache-2.0"
] | null | null | null | ambassador/ambassador/config/resourcefetcher.py | Andrei-Predoiu/ambassador | efbd0ac8d65e36eab68997051167bc3eea165f35 | [
"Apache-2.0"
] | null | null | null | ambassador/ambassador/config/resourcefetcher.py | Andrei-Predoiu/ambassador | efbd0ac8d65e36eab68997051167bc3eea165f35 | [
"Apache-2.0"
] | null | null | null | from typing import Any, Dict, List, Optional, Tuple, TYPE_CHECKING
# from typing import cast as typecast
import json
import logging
import os
import yaml
from .config import Config
from .acresource import ACResource
from ..utils import parse_yaml, dump_yaml
AnyDict = Dict[str, Any]
HandlerResult = Optional[Tuple[str, List[AnyDict]]]
# Some thoughts:
# - loading a bunch of Ambassador resources is different from loading a bunch of K8s
# services, because we should assume that if we're being a fed a bunch of Ambassador
# resources, we'll get a full set. The whole 'secret loader' thing needs to have the
# concept of a TLSSecret resource that can be force-fed to us, or that can be fetched
# through the loader if needed.
# - If you're running a debug-loop Ambassador, you should just have a flat (or
# recursive, I don't care) directory full of Ambassador YAML, including TLSSecrets
# and Endpoints and whatnot, as needed. All of it will get read by
# load_from_filesystem and end up in the elements array.
# - If you're running expecting to be fed by kubewatch, at present kubewatch will
# send over K8s Service records, and anything annotated in there will end up in
# elements. This may include TLSSecrets or Endpoints. Any TLSSecret mentioned that
# isn't already in elements will need to be fetched.
# - Ambassador resources do not have namespaces. They have the ambassador_id. That's
# it. The ambassador_id is completely orthogonal to the namespace. No element with
# the wrong ambassador_id will end up in elements. It would be nice if they were
# never sent by kubewatch, but, well, y'know.
# - TLSSecret resources are not TLSContexts. TLSSecrets only have a name, a private
# half, and a public half. They do _not_ have other TLSContext information.
# - Endpoint resources probably have just a name, a service name, and an endpoint
# address.
class ResourceFetcher:
def __init__(self, logger: logging.Logger, aconf: 'Config') -> None:
self.aconf = aconf
self.logger = logger
self.elements: List[ACResource] = []
self.filename: Optional[str] = None
self.ocount: int = 1
self.saved: List[Tuple[Optional[str], int]] = []
self.k8s_endpoints: Dict[str, AnyDict] = {}
self.k8s_services: Dict[str, AnyDict] = {}
self.services: Dict[str, AnyDict] = {}
@property
def location(self):
return "%s.%d" % (self.filename or "anonymous YAML", self.ocount)
def push_location(self, filename: Optional[str], ocount: int) -> None:
self.saved.append((self.filename, self.ocount))
self.filename = filename
self.ocount = ocount
def pop_location(self) -> None:
self.filename, self.ocount = self.saved.pop()
def load_from_filesystem(self, config_dir_path, recurse: bool=False, k8s: bool=False):
inputs: List[Tuple[str, str]] = []
if os.path.isdir(config_dir_path):
dirs = [ config_dir_path ]
while dirs:
dirpath = dirs.pop(0)
for filename in os.listdir(dirpath):
filepath = os.path.join(dirpath, filename)
if recurse and os.path.isdir(filepath):
# self.logger.debug("%s: RECURSE" % filepath)
dirs.append(filepath)
continue
if not os.path.isfile(filepath):
# self.logger.debug("%s: SKIP non-file" % filepath)
continue
if not filename.lower().endswith('.yaml'):
# self.logger.debug("%s: SKIP non-YAML" % filepath)
continue
# self.logger.debug("%s: SAVE configuration file" % filepath)
inputs.append((filepath, filename))
else:
# this allows a file to be passed into the ambassador cli
# rather than just a directory
inputs.append((config_dir_path, os.path.basename(config_dir_path)))
for filepath, filename in inputs:
self.logger.info("reading %s (%s)" % (filename, filepath))
try:
serialization = open(filepath, "r").read()
self.parse_yaml(serialization, k8s=k8s, filename=filename)
except IOError as e:
self.aconf.post_error("could not read YAML from %s: %s" % (filepath, e))
self.finalize()
def parse_yaml(self, serialization: str, k8s=False, rkey: Optional[str]=None,
filename: Optional[str]=None) -> None:
# self.logger.debug("%s: parsing %d byte%s of YAML:\n%s" %
# (self.location, len(serialization), "" if (len(serialization) == 1) else "s",
# serialization))
try:
objects = parse_yaml(serialization)
self.parse_object(objects=objects, k8s=k8s, rkey=rkey, filename=filename)
except yaml.error.YAMLError as e:
self.aconf.post_error("%s: could not parse YAML: %s" % (self.location, e))
self.finalize()
def parse_json(self, serialization: str, k8s=False, rkey: Optional[str]=None,
filename: Optional[str]=None) -> None:
# self.logger.debug("%s: parsing %d byte%s of YAML:\n%s" %
# (self.location, len(serialization), "" if (len(serialization) == 1) else "s",
# serialization))
try:
objects = json.loads(serialization)
self.parse_object(objects=objects, k8s=k8s, rkey=rkey, filename=filename)
except json.decoder.JSONDecodeError as e:
self.aconf.post_error("%s: could not parse YAML: %s" % (self.location, e))
self.finalize()
def parse_watt(self, serialization: str) -> None:
basedir = os.environ.get('AMBASSADOR_CONFIG_BASE_DIR', '/ambassador')
if os.path.isfile(os.path.join(basedir, '.ambassador_ignore_crds')):
self.aconf.post_error("Ambassador could not find core CRD definitions. Please visit https://www.getambassador.io/reference/core/crds/ for more information. You can continue using Ambassador via Kubernetes annotations, any configuration via CRDs will be ignored...")
if os.path.isfile(os.path.join(basedir, '.ambassador_ignore_crds_2')):
self.aconf.post_error("Ambassador could not find Resolver type CRD definitions. Please visit https://www.getambassador.io/reference/core/crds/ for more information. You can continue using Ambassador via Kubernetes annotations, any configuration via CRDs will be ignored...")
try:
watt_dict = json.loads(serialization)
watt_k8s = watt_dict.get('Kubernetes', {})
# Handle normal Kube objects...
for key in [ 'service', 'endpoints', 'secret' ]:
for obj in watt_k8s.get(key) or []:
self.handle_k8s(obj)
# ...then handle Ambassador CRDs.
for key in [ 'AuthService', 'ConsulResolver',
'KubernetesEndpointResolver', 'KubernetesServiceResolver',
'Mapping', 'Module', 'RateLimitService',
'TCPMapping', 'TLSContext', 'TracingService']:
for obj in watt_k8s.get(key) or []:
self.handle_k8s_crd(obj)
watt_consul = watt_dict.get('Consul', {})
consul_endpoints = watt_consul.get('Endpoints', {})
for consul_rkey, consul_object in consul_endpoints.items():
result = self.handle_consul_service(consul_rkey, consul_object)
if result:
rkey, parsed_objects = result
self.parse_object(parsed_objects, k8s=False,
filename=self.filename, rkey=rkey)
except json.decoder.JSONDecodeError as e:
self.aconf.post_error("%s: could not parse WATT: %s" % (self.location, e))
self.finalize()
def handle_k8s(self, obj: dict) -> None:
# self.logger.debug("handle_k8s obj %s" % json.dumps(obj, indent=4, sort_keys=True))
kind = obj.get('kind')
if not kind:
# self.logger.debug("%s: ignoring K8s object, no kind" % self.location)
return
handler_name = f'handle_k8s_{kind.lower()}'
handler = getattr(self, handler_name, None)
if not handler:
# self.logger.debug("%s: ignoring K8s object, no kind" % self.location)
return
result = handler(obj)
if result:
rkey, parsed_objects = result
self.parse_object(parsed_objects, k8s=False,
filename=self.filename, rkey=rkey)
def handle_k8s_crd(self, obj: dict) -> None:
# CRDs are _not_ allowed to have embedded objects in annotations, because ew.
kind = obj.get('kind')
if not kind:
self.logger.debug("%s: ignoring K8s CRD, no kind" % self.location)
return
apiVersion = obj.get('apiVersion')
metadata = obj.get('metadata') or {}
name = metadata.get('name')
namespace = metadata.get('namespace') or 'default'
spec = obj.get('spec') or {}
if not name:
self.logger.debug(f'{self.location}: ignoring K8s {kind} CRD, no name')
return
if not apiVersion:
self.logger.debug(f'{self.location}: ignoring K8s {kind} CRD {name}: no apiVersion')
return
# if not spec:
# self.logger.debug(f'{self.location}: ignoring K8s {kind} CRD {name}: no spec')
# return
# We use this resource identifier as a key into self.k8s_services, and of course for logging .
resource_identifier = f'{name}.{namespace}'
# OK. Shallow copy 'spec'...
amb_object = dict(spec)
# ...and then stuff in a couple of other things.
amb_object['apiVersion'] = apiVersion
amb_object['name'] = name
amb_object['kind'] = kind
# Done. Parse it.
self.parse_object([ amb_object ], k8s=False, filename=self.filename, rkey=resource_identifier)
def parse_object(self, objects, k8s=False, rkey: Optional[str]=None, filename: Optional[str]=None):
self.push_location(filename, 1)
# self.logger.debug("PARSE_OBJECT: incoming %d" % len(objects))
for obj in objects:
self.logger.debug("PARSE_OBJECT: checking %s" % obj)
if k8s:
self.handle_k8s(obj)
else:
# if not obj:
# self.logger.debug("%s: empty object from %s" % (self.location, serialization))
self.process_object(obj, rkey=rkey)
self.ocount += 1
self.pop_location()
def process_object(self, obj: dict, rkey: Optional[str]=None) -> None:
if not isinstance(obj, dict):
# Bug!!
if not obj:
self.aconf.post_error("%s is empty" % self.location)
else:
self.aconf.post_error("%s is not a dictionary? %s" %
(self.location, json.dumps(obj, indent=4, sort_keys=4)))
return
if not self.aconf.good_ambassador_id(obj):
# self.logger.debug("%s ignoring K8s Service with mismatched ambassador_id" % self.location)
return
if 'kind' not in obj:
# Bug!!
self.aconf.post_error("%s is missing 'kind'?? %s" %
(self.location, json.dumps(obj, indent=4, sort_keys=True)))
return
# self.logger.debug("%s PROCESS %s initial rkey %s" % (self.location, obj['kind'], rkey))
# Is this a pragma object?
if obj['kind'] == 'Pragma':
# Why did I think this was a good idea? [ :) ]
new_source = obj.get('source', None)
if new_source:
# We don't save the old self.filename here, so this change will last until
# the next input source (or the next Pragma).
self.filename = new_source
# Don't count Pragma objects, since the user generally doesn't write them.
self.ocount -= 1
return
if not rkey:
rkey = self.filename
rkey = "%s.%d" % (rkey, self.ocount)
# self.logger.debug("%s PROCESS %s updated rkey to %s" % (self.location, obj['kind'], rkey))
# Fine. Fine fine fine.
serialization = dump_yaml(obj, default_flow_style=False)
r = ACResource.from_dict(rkey, rkey, serialization, obj)
self.elements.append(r)
# self.logger.debug("%s PROCESS %s save %s: %s" % (self.location, obj['kind'], rkey, serialization))
def sorted(self, key=lambda x: x.rkey): # returns an iterator, probably
return sorted(self.elements, key=key)
def handle_k8s_endpoints(self, k8s_object: AnyDict) -> HandlerResult:
# Don't include Endpoints unless endpoint routing is enabled.
if not Config.enable_endpoints:
return None
metadata = k8s_object.get('metadata', None)
resource_name = metadata.get('name') if metadata else None
resource_namespace = metadata.get('namespace', 'default') if metadata else None
resource_subsets = k8s_object.get('subsets', None)
skip = False
if not metadata:
self.logger.debug("ignoring K8s Endpoints with no metadata")
skip = True
if not resource_name:
self.logger.debug("ignoring K8s Endpoints with no name")
skip = True
if not resource_subsets:
self.logger.debug(f"ignoring K8s Endpoints {resource_name}.{resource_namespace} with no subsets")
skip = True
if skip:
return None
# We use this resource identifier as a key into self.k8s_services, and of course for logging .
resource_identifier = '{name}.{namespace}'.format(namespace=resource_namespace, name=resource_name)
# K8s Endpoints resources are _stupid_ in that they give you a vector of
# IP addresses and a vector of ports, and you have to assume that every
# IP address listens on every port, and that the semantics of each port
# are identical. The first is usually a good assumption. The second is not:
# people routinely list 80 and 443 for the same service, for example,
# despite the fact that one is HTTP and the other is HTTPS.
#
# By the time the ResourceFetcher is done, we want to be working with
# Ambassador Service resources, which have an array of address:port entries
# for endpoints. So we're going to extract the address and port numbers
# as arrays of tuples and stash them for later.
#
# In Kubernetes-speak, the Endpoints resource has some metadata and a set
# of "subsets" (though I've personally never seen more than one subset in
# one of these things).
for subset in resource_subsets:
# K8s subset addresses have some node info in with the IP address.
# May as well save that too.
addresses = []
for address in subset.get('addresses', []):
addr = {}
ip = address.get('ip', None)
if ip is not None:
addr['ip'] = ip
node = address.get('nodeName', None)
if node is not None:
addr['node'] = node
target_ref = address.get('targetRef', None)
if target_ref is not None:
target_kind = target_ref.get('kind', None)
if target_kind is not None:
addr['target_kind'] = target_kind
target_name = target_ref.get('name', None)
if target_name is not None:
addr['target_name'] = target_name
target_namespace = target_ref.get('namespace', None)
if target_namespace is not None:
addr['target_namespace'] = target_namespace
if len(addr) > 0:
addresses.append(addr)
# If we got no addresses, there's no point in messing with ports.
if len(addresses) == 0:
continue
ports = subset.get('ports', [])
# A service can reference a port either by name or by port number.
port_dict = {}
for port in ports:
port_name = port.get('name', None)
port_number = port.get('port', None)
port_proto = port.get('protocol', 'TCP').upper()
if port_proto != 'TCP':
continue
if port_number is None:
# WTFO.
continue
port_dict[str(port_number)] = port_number
if port_name:
port_dict[port_name] = port_number
if port_dict:
# We're not going to actually return this: we'll just stash it for our
# later resolution pass.
self.k8s_endpoints[resource_identifier] = {
'name': resource_name,
'namespace': resource_namespace,
'addresses': addresses,
'ports': port_dict
}
else:
self.logger.debug(f"ignoring K8s Endpoints {resource_identifier} with no routable ports")
return None
def handle_k8s_service(self, k8s_object: AnyDict) -> HandlerResult:
# The annoying bit about K8s Service resources is that not only do we have to look
# inside them for Ambassador resources, but we also have to save their info for
# later endpoint resolution too.
#
# Again, we're trusting that the input isn't overly bloated on that latter bit.
metadata = k8s_object.get('metadata', None)
resource_name = metadata.get('name') if metadata else None
resource_namespace = metadata.get('namespace', 'default') if metadata else None
annotations = metadata.get('annotations', None) if metadata else None
if annotations:
annotations = annotations.get('getambassador.io/config', None)
skip = False
if not metadata:
self.logger.debug("ignoring K8s Service with no metadata")
skip = True
if not skip and not resource_name:
self.logger.debug("ignoring K8s Service with no name")
skip = True
if not skip and (Config.single_namespace and (resource_namespace != Config.ambassador_namespace)):
# This should never happen in actual usage, since we shouldn't be given things
# in the wrong namespace. However, in development, this can happen a lot.
self.logger.debug(f"ignoring K8s Service {resource_name}.{resource_namespace} in wrong namespace")
skip = True
if skip:
return None
# We use this resource identifier as a key into self.k8s_services, and of course for logging .
resource_identifier = f'{resource_name}.{resource_namespace}'
# Not skipping. First, if we have some actual ports, stash this in self.k8s_services
# for later resolution.
spec = k8s_object.get('spec', None)
ports = spec.get('ports', None) if spec else None
if spec and ports:
self.k8s_services[resource_identifier] = {
'name': resource_name,
'namespace': resource_namespace,
'ports': ports
}
else:
self.logger.debug(f"not saving K8s Service {resource_name}.{resource_namespace} with no ports")
objects: List[Any] = []
if annotations:
if (self.filename is not None) and (not self.filename.endswith(":annotation")):
self.filename += ":annotation"
try:
objects = parse_yaml(annotations)
except yaml.error.YAMLError as e:
self.logger.debug("could not parse YAML: %s" % e)
return resource_identifier, objects
# Handler for K8s Secret resources.
def handle_k8s_secret(self, k8s_object: AnyDict) -> HandlerResult:
# XXX Another one where we shouldn't be saving everything.
secret_type = k8s_object.get('type', None)
metadata = k8s_object.get('metadata', None)
resource_name = metadata.get('name') if metadata else None
resource_namespace = metadata.get('namespace', 'default') if metadata else None
data = k8s_object.get('data', None)
skip = False
if (secret_type != 'kubernetes.io/tls') and (secret_type != 'Opaque'):
self.logger.debug("ignoring K8s Secret with unknown type %s" % secret_type)
skip = True
if not data:
self.logger.debug("ignoring K8s Secret with no data")
skip = True
if not metadata:
self.logger.debug("ignoring K8s Secret with no metadata")
skip = True
if not resource_name:
self.logger.debug("ignoring K8s Secret with no name")
skip = True
if not skip and (Config.single_namespace and (resource_namespace != Config.ambassador_namespace)):
# This should never happen in actual usage, since we shouldn't be given things
# in the wrong namespace. However, in development, this can happen a lot.
self.logger.debug("ignoring K8s Secret in wrong namespace")
skip = True
if skip:
return None
# This resource identifier is useful for log output since filenames can be duplicated (multiple subdirectories)
resource_identifier = f'{resource_name}.{resource_namespace}'
tls_crt = data.get('tls.crt', None)
tls_key = data.get('tls.key', None)
if not tls_crt and not tls_key:
# Uh. WTFO?
self.logger.debug(f'ignoring K8s Secret {resource_identifier} with no keys')
return None
# No need to muck about with resolution later, just immediately turn this
# into an Ambassador Secret resource.
secret_info = {
'apiVersion': 'ambassador/v1',
'ambassador_id': Config.ambassador_id,
'kind': 'Secret',
'name': resource_name,
'namespace': resource_namespace
}
if tls_crt:
secret_info['tls_crt'] = tls_crt
if tls_key:
secret_info['tls_key'] = tls_key
return resource_identifier, [ secret_info ]
# Handler for Consul services
def handle_consul_service(self,
consul_rkey: str, consul_object: AnyDict) -> HandlerResult:
# resource_identifier = f'consul-{consul_rkey}'
endpoints = consul_object.get('Endpoints', [])
name = consul_object.get('Service', consul_rkey)
if len(endpoints) < 1:
# Bzzt.
self.logger.debug(f"ignoring Consul service {name} with no Endpoints")
return None
# We can turn this directly into an Ambassador Service resource, since Consul keeps
# services and endpoints together (as it should!!).
#
# Note that we currently trust the association ID to contain the datacenter name.
# That's a function of the watch_hook putting it there.
svc = {
'apiVersion': 'ambassador/v1',
'ambassador_id': Config.ambassador_id,
'kind': 'Service',
'name': name,
'datacenter': consul_object.get('Id') or 'dc1',
'endpoints': {}
}
for ep in endpoints:
ep_addr = ep.get('Address')
ep_port = ep.get('Port')
if not ep_addr or not ep_port:
self.logger.debug(f"ignoring Consul service {name} endpoint {ep['ID']} missing address info")
continue
# Consul services don't have the weird indirections that Kube services do, so just
# lump all the endpoints together under the same source port of '*'.
svc_eps = svc['endpoints'].setdefault('*', [])
svc_eps.append({
'ip': ep_addr,
'port': ep_port,
'target_kind': 'Consul'
})
# Once again: don't return this. Instead, save it in self.services.
self.services[f"consul-{name}-{svc['datacenter']}"] = svc
return None
def finalize(self) -> None:
# The point here is to sort out self.k8s_services and self.k8s_endpoints and
# turn them into proper Ambassador Service resources. This is a bit annoying,
# because of the annoyances of Kubernetes, but we'll give it a go.
#
# Here are the rules:
#
# 1. By the time we get here, we have a _complete_ set of Ambassador resources that
# have passed muster by virtue of having the correct namespace, the correct
# ambassador_id, etc. (They may have duplicate names at this point, admittedly.)
# Any service not mentioned by name is out. Since the Ambassador resources in
# self.elements are in fact AResources, we can farm this out to code for each
# resource.
#
# 2. The check is, by design, permissive. If in doubt, write the check to leave
# the resource in.
#
# 3. For any service that stays in, we vet its listed ports against self.k8s_endpoints.
# Anything with no matching ports is _not_ dropped; it is assumed to use service
# routing rather than endpoint routing.
od = {
'elements': [ x.as_dict() for x in self.elements ],
'k8s_endpoints': self.k8s_endpoints,
'k8s_services': self.k8s_services,
'services': self.services
}
# self.logger.debug("==== FINALIZE START\n%s" % json.dumps(od, sort_keys=True, indent=4))
for key, k8s_svc in self.k8s_services.items():
# See if we can find endpoints for this service.
k8s_ep = self.k8s_endpoints.get(key, None)
k8s_ep_ports = k8s_ep.get('ports', None) if k8s_ep else None
k8s_name = k8s_svc['name']
k8s_namespace = k8s_svc['namespace']
# OK, Kube is weird. The way all this works goes like this:
#
# 1. When you create a Kube Service, Kube will allocate a clusterIP
# for it and update DNS to resolve the name of the service to
# that clusterIP.
# 2. Kube will look over the pods matched by the Service's selectors
# and stick those pods' IP addresses into Endpoints for the Service.
# 3. The Service will have ports listed. These service.port entries can
# contain:
# port -- a port number you can talk to at the clusterIP
# name -- a name for this port
# targetPort -- a port number you can talk to at the _endpoint_ IP
# We'll call the 'port' entry here the "service-port".
# 4. If you talk to clusterIP:service-port, you will get magically
# proxied by the Kube CNI to a target port at one of the endpoint IPs.
#
# The $64K question is: how does Kube decide which target port to use?
#
# First, if there's only one endpoint port, that's the one that gets used.
#
# If there's more than one, if the Service's port entry has a targetPort
# number, it uses that. Otherwise it tries to find an endpoint port with
# the same name as the service port. Otherwise, I dunno, it punts and uses
# the service-port.
#
# So that's how Ambassador is going to do it, for each Service port entry.
#
# If we have no endpoints at all, Ambassador will end up routing using
# just the service name and port per the Mapping's service spec.
target_ports = {}
target_addrs = []
svc_endpoints = {}
if not k8s_ep or not k8s_ep_ports:
# No endpoints at all, so we're done with this service.
self.logger.debug(f'{key}: no endpoints at all')
else:
idx = -1
for port in k8s_svc['ports']:
idx += 1
k8s_target: Optional[int] = None
src_port = port.get('port', None)
if not src_port:
# WTFO. This is impossible.
self.logger.error(f"Kubernetes service {key} has no port number at index {idx}?")
continue
if len(k8s_ep_ports) == 1:
# Just one endpoint port. Done.
k8s_target = list(k8s_ep_ports.values())[0]
target_ports[src_port] = k8s_target
self.logger.debug(f'{key} port {src_port}: single endpoint port {k8s_target}')
continue
# Hmmm, we need to try to actually map whatever ports are listed for
# this service. Oh well.
found_key = False
fallback: Optional[int] = None
for attr in [ 'targetPort', 'name', 'port' ]:
port_key = port.get(attr) # This could be a name or a number, in general.
if port_key:
found_key = True
if not fallback and (port_key != 'name') and str(port_key).isdigit():
# fallback can only be digits.
fallback = port_key
# Do we have a destination port for this?
k8s_target = k8s_ep_ports.get(str(port_key), None)
if k8s_target:
self.logger.debug(f'{key} port {src_port} #{idx}: {attr} {port_key} -> {k8s_target}')
break
else:
self.logger.debug(f'{key} port {src_port} #{idx}: {attr} {port_key} -> miss')
if not found_key:
# WTFO. This is impossible.
self.logger.error(f"Kubernetes service {key} port {src_port} has an empty port spec at index {idx}?")
continue
if not k8s_target:
# This is most likely because we don't have endpoint info at all, so we'll do service
# routing.
#
# It's actually impossible for fallback to be unset, but WTF.
k8s_target = fallback or src_port
self.logger.debug(f'{key} port {src_port} #{idx}: falling back to {k8s_target}')
target_ports[src_port] = k8s_target
if not target_ports:
# WTFO. This is impossible. I guess we'll fall back to service routing.
self.logger.error(f"Kubernetes service {key} has no routable ports at all?")
# OK. Once _that's_ done we have to take the endpoint addresses into
# account, or just use the service name if we don't have that.
k8s_ep_addrs = k8s_ep.get('addresses', None)
if k8s_ep_addrs:
for addr in k8s_ep_addrs:
ip = addr.get('ip', None)
if ip:
target_addrs.append(ip)
# OK! If we have no target addresses, just use service routing.
if not target_addrs:
self.logger.debug(f'{key} falling back to service routing')
target_addrs = [ key ]
for src_port, target_port in target_ports.items():
svc_endpoints[src_port] = [ {
'ip': target_addr,
'port': target_port
} for target_addr in target_addrs ]
# Nope. Set this up for service routing.
self.services[f'k8s-{k8s_name}-{k8s_namespace}'] = {
'apiVersion': 'ambassador/v1',
'ambassador_id': Config.ambassador_id,
'kind': 'Service',
'name': k8s_name,
'namespace': k8s_namespace,
'endpoints': svc_endpoints
}
# OK. After all that, go turn all of the things in self.services into Ambassador
# Service resources.
for key, svc in self.services.items():
serialization = dump_yaml(svc, default_flow_style=False)
r = ACResource.from_dict(key, key, serialization, svc)
self.elements.append(r)
od = {
'elements': [ x.as_dict() for x in self.elements ],
'k8s_endpoints': self.k8s_endpoints,
'k8s_services': self.k8s_services,
'services': self.services
}
# self.logger.debug("==== FINALIZE END\n%s" % json.dumps(od, sort_keys=True, indent=4))
| 41.332924 | 286 | 0.575895 | from typing import Any, Dict, List, Optional, Tuple, TYPE_CHECKING
import json
import logging
import os
import yaml
from .config import Config
from .acresource import ACResource
from ..utils import parse_yaml, dump_yaml
AnyDict = Dict[str, Any]
HandlerResult = Optional[Tuple[str, List[AnyDict]]]
# resources, we'll get a full set. The whole 'secret loader' thing needs to have the
# recursive, I don't care) directory full of Ambassador YAML, including TLSSecrets
# send over K8s Service records, and anything annotated in there will end up in
# elements. This may include TLSSecrets or Endpoints. Any TLSSecret mentioned that
# isn't already in elements will need to be fetched.
# it. The ambassador_id is completely orthogonal to the namespace. No element with
# the wrong ambassador_id will end up in elements. It would be nice if they were
# never sent by kubewatch, but, well, y'know.
class ResourceFetcher:
def __init__(self, logger: logging.Logger, aconf: 'Config') -> None:
self.aconf = aconf
self.logger = logger
self.elements: List[ACResource] = []
self.filename: Optional[str] = None
self.ocount: int = 1
self.saved: List[Tuple[Optional[str], int]] = []
self.k8s_endpoints: Dict[str, AnyDict] = {}
self.k8s_services: Dict[str, AnyDict] = {}
self.services: Dict[str, AnyDict] = {}
@property
def location(self):
return "%s.%d" % (self.filename or "anonymous YAML", self.ocount)
def push_location(self, filename: Optional[str], ocount: int) -> None:
self.saved.append((self.filename, self.ocount))
self.filename = filename
self.ocount = ocount
def pop_location(self) -> None:
self.filename, self.ocount = self.saved.pop()
def load_from_filesystem(self, config_dir_path, recurse: bool=False, k8s: bool=False):
inputs: List[Tuple[str, str]] = []
if os.path.isdir(config_dir_path):
dirs = [ config_dir_path ]
while dirs:
dirpath = dirs.pop(0)
for filename in os.listdir(dirpath):
filepath = os.path.join(dirpath, filename)
if recurse and os.path.isdir(filepath):
dirs.append(filepath)
continue
if not os.path.isfile(filepath):
continue
if not filename.lower().endswith('.yaml'):
continue
inputs.append((filepath, filename))
else:
inputs.append((config_dir_path, os.path.basename(config_dir_path)))
for filepath, filename in inputs:
self.logger.info("reading %s (%s)" % (filename, filepath))
try:
serialization = open(filepath, "r").read()
self.parse_yaml(serialization, k8s=k8s, filename=filename)
except IOError as e:
self.aconf.post_error("could not read YAML from %s: %s" % (filepath, e))
self.finalize()
def parse_yaml(self, serialization: str, k8s=False, rkey: Optional[str]=None,
filename: Optional[str]=None) -> None:
try:
objects = parse_yaml(serialization)
self.parse_object(objects=objects, k8s=k8s, rkey=rkey, filename=filename)
except yaml.error.YAMLError as e:
self.aconf.post_error("%s: could not parse YAML: %s" % (self.location, e))
self.finalize()
def parse_json(self, serialization: str, k8s=False, rkey: Optional[str]=None,
filename: Optional[str]=None) -> None:
try:
objects = json.loads(serialization)
self.parse_object(objects=objects, k8s=k8s, rkey=rkey, filename=filename)
except json.decoder.JSONDecodeError as e:
self.aconf.post_error("%s: could not parse YAML: %s" % (self.location, e))
self.finalize()
def parse_watt(self, serialization: str) -> None:
basedir = os.environ.get('AMBASSADOR_CONFIG_BASE_DIR', '/ambassador')
if os.path.isfile(os.path.join(basedir, '.ambassador_ignore_crds')):
self.aconf.post_error("Ambassador could not find core CRD definitions. Please visit https://www.getambassador.io/reference/core/crds/ for more information. You can continue using Ambassador via Kubernetes annotations, any configuration via CRDs will be ignored...")
if os.path.isfile(os.path.join(basedir, '.ambassador_ignore_crds_2')):
self.aconf.post_error("Ambassador could not find Resolver type CRD definitions. Please visit https://www.getambassador.io/reference/core/crds/ for more information. You can continue using Ambassador via Kubernetes annotations, any configuration via CRDs will be ignored...")
try:
watt_dict = json.loads(serialization)
watt_k8s = watt_dict.get('Kubernetes', {})
for key in [ 'service', 'endpoints', 'secret' ]:
for obj in watt_k8s.get(key) or []:
self.handle_k8s(obj)
for key in [ 'AuthService', 'ConsulResolver',
'KubernetesEndpointResolver', 'KubernetesServiceResolver',
'Mapping', 'Module', 'RateLimitService',
'TCPMapping', 'TLSContext', 'TracingService']:
for obj in watt_k8s.get(key) or []:
self.handle_k8s_crd(obj)
watt_consul = watt_dict.get('Consul', {})
consul_endpoints = watt_consul.get('Endpoints', {})
for consul_rkey, consul_object in consul_endpoints.items():
result = self.handle_consul_service(consul_rkey, consul_object)
if result:
rkey, parsed_objects = result
self.parse_object(parsed_objects, k8s=False,
filename=self.filename, rkey=rkey)
except json.decoder.JSONDecodeError as e:
self.aconf.post_error("%s: could not parse WATT: %s" % (self.location, e))
self.finalize()
def handle_k8s(self, obj: dict) -> None:
kind = obj.get('kind')
if not kind:
return
handler_name = f'handle_k8s_{kind.lower()}'
handler = getattr(self, handler_name, None)
if not handler:
return
result = handler(obj)
if result:
rkey, parsed_objects = result
self.parse_object(parsed_objects, k8s=False,
filename=self.filename, rkey=rkey)
def handle_k8s_crd(self, obj: dict) -> None:
kind = obj.get('kind')
if not kind:
self.logger.debug("%s: ignoring K8s CRD, no kind" % self.location)
return
apiVersion = obj.get('apiVersion')
metadata = obj.get('metadata') or {}
name = metadata.get('name')
namespace = metadata.get('namespace') or 'default'
spec = obj.get('spec') or {}
if not name:
self.logger.debug(f'{self.location}: ignoring K8s {kind} CRD, no name')
return
if not apiVersion:
self.logger.debug(f'{self.location}: ignoring K8s {kind} CRD {name}: no apiVersion')
return
resource_identifier = f'{name}.{namespace}'
amb_object = dict(spec)
amb_object['apiVersion'] = apiVersion
amb_object['name'] = name
amb_object['kind'] = kind
self.parse_object([ amb_object ], k8s=False, filename=self.filename, rkey=resource_identifier)
def parse_object(self, objects, k8s=False, rkey: Optional[str]=None, filename: Optional[str]=None):
self.push_location(filename, 1)
for obj in objects:
self.logger.debug("PARSE_OBJECT: checking %s" % obj)
if k8s:
self.handle_k8s(obj)
else:
self.process_object(obj, rkey=rkey)
self.ocount += 1
self.pop_location()
def process_object(self, obj: dict, rkey: Optional[str]=None) -> None:
if not isinstance(obj, dict):
if not obj:
self.aconf.post_error("%s is empty" % self.location)
else:
self.aconf.post_error("%s is not a dictionary? %s" %
(self.location, json.dumps(obj, indent=4, sort_keys=4)))
return
if not self.aconf.good_ambassador_id(obj):
return
if 'kind' not in obj:
self.aconf.post_error("%s is missing 'kind'?? %s" %
(self.location, json.dumps(obj, indent=4, sort_keys=True)))
return
if obj['kind'] == 'Pragma':
new_source = obj.get('source', None)
if new_source:
# the next input source (or the next Pragma).
self.filename = new_source
# Don't count Pragma objects, since the user generally doesn't write them.
self.ocount -= 1
return
if not rkey:
rkey = self.filename
rkey = "%s.%d" % (rkey, self.ocount)
# self.logger.debug("%s PROCESS %s updated rkey to %s" % (self.location, obj['kind'], rkey))
# Fine. Fine fine fine.
serialization = dump_yaml(obj, default_flow_style=False)
r = ACResource.from_dict(rkey, rkey, serialization, obj)
self.elements.append(r)
# self.logger.debug("%s PROCESS %s save %s: %s" % (self.location, obj['kind'], rkey, serialization))
def sorted(self, key=lambda x: x.rkey): # returns an iterator, probably
return sorted(self.elements, key=key)
def handle_k8s_endpoints(self, k8s_object: AnyDict) -> HandlerResult:
# Don't include Endpoints unless endpoint routing is enabled.
if not Config.enable_endpoints:
return None
metadata = k8s_object.get('metadata', None)
resource_name = metadata.get('name') if metadata else None
resource_namespace = metadata.get('namespace', 'default') if metadata else None
resource_subsets = k8s_object.get('subsets', None)
skip = False
if not metadata:
self.logger.debug("ignoring K8s Endpoints with no metadata")
skip = True
if not resource_name:
self.logger.debug("ignoring K8s Endpoints with no name")
skip = True
if not resource_subsets:
self.logger.debug(f"ignoring K8s Endpoints {resource_name}.{resource_namespace} with no subsets")
skip = True
if skip:
return None
resource_identifier = '{name}.{namespace}'.format(namespace=resource_namespace, name=resource_name)
# as arrays of tuples and stash them for later.
#
# In Kubernetes-speak, the Endpoints resource has some metadata and a set
# of "subsets" (though I've personally never seen more than one subset in
for subset in resource_subsets:
addresses = []
for address in subset.get('addresses', []):
addr = {}
ip = address.get('ip', None)
if ip is not None:
addr['ip'] = ip
node = address.get('nodeName', None)
if node is not None:
addr['node'] = node
target_ref = address.get('targetRef', None)
if target_ref is not None:
target_kind = target_ref.get('kind', None)
if target_kind is not None:
addr['target_kind'] = target_kind
target_name = target_ref.get('name', None)
if target_name is not None:
addr['target_name'] = target_name
target_namespace = target_ref.get('namespace', None)
if target_namespace is not None:
addr['target_namespace'] = target_namespace
if len(addr) > 0:
addresses.append(addr)
if len(addresses) == 0:
continue
ports = subset.get('ports', [])
# A service can reference a port either by name or by port number.
port_dict = {}
for port in ports:
port_name = port.get('name', None)
port_number = port.get('port', None)
port_proto = port.get('protocol', 'TCP').upper()
if port_proto != 'TCP':
continue
if port_number is None:
# WTFO.
continue
port_dict[str(port_number)] = port_number
if port_name:
port_dict[port_name] = port_number
if port_dict:
# We're not going to actually return this: we'll just stash it for our
# later resolution pass.
self.k8s_endpoints[resource_identifier] = {
'name': resource_name,
'namespace': resource_namespace,
'addresses': addresses,
'ports': port_dict
}
else:
self.logger.debug(f"ignoring K8s Endpoints {resource_identifier} with no routable ports")
return None
def handle_k8s_service(self, k8s_object: AnyDict) -> HandlerResult:
# The annoying bit about K8s Service resources is that not only do we have to look
# inside them for Ambassador resources, but we also have to save their info for
# later endpoint resolution too.
#
# Again, we're trusting that the input isn't overly bloated on that latter bit.
metadata = k8s_object.get('metadata', None)
resource_name = metadata.get('name') if metadata else None
resource_namespace = metadata.get('namespace', 'default') if metadata else None
annotations = metadata.get('annotations', None) if metadata else None
if annotations:
annotations = annotations.get('getambassador.io/config', None)
skip = False
if not metadata:
self.logger.debug("ignoring K8s Service with no metadata")
skip = True
if not skip and not resource_name:
self.logger.debug("ignoring K8s Service with no name")
skip = True
if not skip and (Config.single_namespace and (resource_namespace != Config.ambassador_namespace)):
# This should never happen in actual usage, since we shouldn't be given things
self.logger.debug(f"ignoring K8s Service {resource_name}.{resource_namespace} in wrong namespace")
skip = True
if skip:
return None
resource_identifier = f'{resource_name}.{resource_namespace}'
spec = k8s_object.get('spec', None)
ports = spec.get('ports', None) if spec else None
if spec and ports:
self.k8s_services[resource_identifier] = {
'name': resource_name,
'namespace': resource_namespace,
'ports': ports
}
else:
self.logger.debug(f"not saving K8s Service {resource_name}.{resource_namespace} with no ports")
objects: List[Any] = []
if annotations:
if (self.filename is not None) and (not self.filename.endswith(":annotation")):
self.filename += ":annotation"
try:
objects = parse_yaml(annotations)
except yaml.error.YAMLError as e:
self.logger.debug("could not parse YAML: %s" % e)
return resource_identifier, objects
def handle_k8s_secret(self, k8s_object: AnyDict) -> HandlerResult:
secret_type = k8s_object.get('type', None)
metadata = k8s_object.get('metadata', None)
resource_name = metadata.get('name') if metadata else None
resource_namespace = metadata.get('namespace', 'default') if metadata else None
data = k8s_object.get('data', None)
skip = False
if (secret_type != 'kubernetes.io/tls') and (secret_type != 'Opaque'):
self.logger.debug("ignoring K8s Secret with unknown type %s" % secret_type)
skip = True
if not data:
self.logger.debug("ignoring K8s Secret with no data")
skip = True
if not metadata:
self.logger.debug("ignoring K8s Secret with no metadata")
skip = True
if not resource_name:
self.logger.debug("ignoring K8s Secret with no name")
skip = True
if not skip and (Config.single_namespace and (resource_namespace != Config.ambassador_namespace)):
# This should never happen in actual usage, since we shouldn't be given things
self.logger.debug("ignoring K8s Secret in wrong namespace")
skip = True
if skip:
return None
resource_identifier = f'{resource_name}.{resource_namespace}'
tls_crt = data.get('tls.crt', None)
tls_key = data.get('tls.key', None)
if not tls_crt and not tls_key:
self.logger.debug(f'ignoring K8s Secret {resource_identifier} with no keys')
return None
secret_info = {
'apiVersion': 'ambassador/v1',
'ambassador_id': Config.ambassador_id,
'kind': 'Secret',
'name': resource_name,
'namespace': resource_namespace
}
if tls_crt:
secret_info['tls_crt'] = tls_crt
if tls_key:
secret_info['tls_key'] = tls_key
return resource_identifier, [ secret_info ]
def handle_consul_service(self,
consul_rkey: str, consul_object: AnyDict) -> HandlerResult:
endpoints = consul_object.get('Endpoints', [])
name = consul_object.get('Service', consul_rkey)
if len(endpoints) < 1:
self.logger.debug(f"ignoring Consul service {name} with no Endpoints")
return None
svc = {
'apiVersion': 'ambassador/v1',
'ambassador_id': Config.ambassador_id,
'kind': 'Service',
'name': name,
'datacenter': consul_object.get('Id') or 'dc1',
'endpoints': {}
}
for ep in endpoints:
ep_addr = ep.get('Address')
ep_port = ep.get('Port')
if not ep_addr or not ep_port:
self.logger.debug(f"ignoring Consul service {name} endpoint {ep['ID']} missing address info")
continue
# Consul services don't have the weird indirections that Kube services do, so just
svc_eps = svc['endpoints'].setdefault('*', [])
svc_eps.append({
'ip': ep_addr,
'port': ep_port,
'target_kind': 'Consul'
})
self.services[f"consul-{name}-{svc['datacenter']}"] = svc
return None
def finalize(self) -> None:
# The point here is to sort out self.k8s_services and self.k8s_endpoints and
# turn them into proper Ambassador Service resources. This is a bit annoying,
# because of the annoyances of Kubernetes, but we'll give it a go.
od = {
'elements': [ x.as_dict() for x in self.elements ],
'k8s_endpoints': self.k8s_endpoints,
'k8s_services': self.k8s_services,
'services': self.services
}
for key, k8s_svc in self.k8s_services.items():
k8s_ep = self.k8s_endpoints.get(key, None)
k8s_ep_ports = k8s_ep.get('ports', None) if k8s_ep else None
k8s_name = k8s_svc['name']
k8s_namespace = k8s_svc['namespace']
# and stick those pods' IP addresses into Endpoints for the Service.
# 4. If you talk to clusterIP:service-port, you will get magically
# proxied by the Kube CNI to a target port at one of the endpoint IPs.
#
# The $64K question is: how does Kube decide which target port to use?
#
# First, if there's only one endpoint port, that's the one that gets used.
#
# If there's more than one, if the Service's port entry has a targetPort
# number, it uses that. Otherwise it tries to find an endpoint port with
# the same name as the service port. Otherwise, I dunno, it punts and uses
# the service-port.
#
# So that's how Ambassador is going to do it, for each Service port entry.
target_ports = {}
target_addrs = []
svc_endpoints = {}
if not k8s_ep or not k8s_ep_ports:
# No endpoints at all, so we're done with this service.
self.logger.debug(f'{key}: no endpoints at all')
else:
idx = -1
for port in k8s_svc['ports']:
idx += 1
k8s_target: Optional[int] = None
src_port = port.get('port', None)
if not src_port:
self.logger.error(f"Kubernetes service {key} has no port number at index {idx}?")
continue
if len(k8s_ep_ports) == 1:
k8s_target = list(k8s_ep_ports.values())[0]
target_ports[src_port] = k8s_target
self.logger.debug(f'{key} port {src_port}: single endpoint port {k8s_target}')
continue
found_key = False
fallback: Optional[int] = None
for attr in [ 'targetPort', 'name', 'port' ]:
port_key = port.get(attr)
if port_key:
found_key = True
if not fallback and (port_key != 'name') and str(port_key).isdigit():
fallback = port_key
k8s_target = k8s_ep_ports.get(str(port_key), None)
if k8s_target:
self.logger.debug(f'{key} port {src_port} #{idx}: {attr} {port_key} -> {k8s_target}')
break
else:
self.logger.debug(f'{key} port {src_port} #{idx}: {attr} {port_key} -> miss')
if not found_key:
self.logger.error(f"Kubernetes service {key} port {src_port} has an empty port spec at index {idx}?")
continue
if not k8s_target:
k8s_target = fallback or src_port
self.logger.debug(f'{key} port {src_port}
target_ports[src_port] = k8s_target
if not target_ports:
# WTFO. This is impossible. I guess we'll fall back to service routing.
self.logger.error(f"Kubernetes service {key} has no routable ports at all?")
# account, or just use the service name if we don't have that.
k8s_ep_addrs = k8s_ep.get('addresses', None)
if k8s_ep_addrs:
for addr in k8s_ep_addrs:
ip = addr.get('ip', None)
if ip:
target_addrs.append(ip)
if not target_addrs:
self.logger.debug(f'{key} falling back to service routing')
target_addrs = [ key ]
for src_port, target_port in target_ports.items():
svc_endpoints[src_port] = [ {
'ip': target_addr,
'port': target_port
} for target_addr in target_addrs ]
self.services[f'k8s-{k8s_name}-{k8s_namespace}'] = {
'apiVersion': 'ambassador/v1',
'ambassador_id': Config.ambassador_id,
'kind': 'Service',
'name': k8s_name,
'namespace': k8s_namespace,
'endpoints': svc_endpoints
}
for key, svc in self.services.items():
serialization = dump_yaml(svc, default_flow_style=False)
r = ACResource.from_dict(key, key, serialization, svc)
self.elements.append(r)
od = {
'elements': [ x.as_dict() for x in self.elements ],
'k8s_endpoints': self.k8s_endpoints,
'k8s_services': self.k8s_services,
'services': self.services
}
| true | true |
f726c946b28663f6039c3f5e99cf5aef3b6ee900 | 302 | py | Python | pyexcel/sheets/__init__.py | EnjoyLifeFund/macHighSierra-py36-pkgs | 5668b5785296b314ea1321057420bcd077dba9ea | [
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | 1 | 2022-01-25T22:52:58.000Z | 2022-01-25T22:52:58.000Z | pyexcel/sheets/__init__.py | EnjoyLifeFund/Debian_py36_packages | 1985d4c73fabd5f08f54b922e73a9306e09c77a5 | [
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | null | null | null | pyexcel/sheets/__init__.py | EnjoyLifeFund/Debian_py36_packages | 1985d4c73fabd5f08f54b922e73a9306e09c77a5 | [
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | null | null | null | """
pyexcel.sheets
~~~~~~~~~~~~~~~~~~~
Core functionality of pyexcel, data model
:copyright: (c) 2014-2017 by Onni Software Ltd.
:license: New BSD License, see LICENSE for more details
"""
# flake8: noqa
from .sheet import Sheet
from .matrix import Matrix, transpose, Row, Column
| 23.230769 | 59 | 0.652318 |
from .sheet import Sheet
from .matrix import Matrix, transpose, Row, Column
| true | true |
f726caa23a352acc6478111d0682e07e79f4858c | 860 | py | Python | route/main_file.py | LiteHell/openNAMU | 72bd1ec1d4b63bd74876bf1c4bb3c4002ec9fdd1 | [
"BSD-3-Clause"
] | null | null | null | route/main_file.py | LiteHell/openNAMU | 72bd1ec1d4b63bd74876bf1c4bb3c4002ec9fdd1 | [
"BSD-3-Clause"
] | null | null | null | route/main_file.py | LiteHell/openNAMU | 72bd1ec1d4b63bd74876bf1c4bb3c4002ec9fdd1 | [
"BSD-3-Clause"
] | null | null | null | from .tool.func import *
from . import main_error_404
def main_file_2(conn, data):
curs = conn.cursor()
if data == 'easter_egg.html':
return easy_minify(flask.render_template(skin_check(),
imp = ['easter_egg.html', wiki_set(), custom(), other2([0, 0])],
data = open('./views/main_css/file/easter_egg.html', 'r').read(),
menu = 0
))
elif re.search('\.txt$', data) or data == 'sitemap.xml':
if data == 'robots.txt' and not os.path.exists('robots.txt'):
return flask.Response('User-agent: *\nDisallow: /\nAllow: /$\nAllow: /w/', mimetype='text/plain')
elif os.path.exists(data):
return flask.send_from_directory('./', data)
else:
return main_error_404.main_error_404_2(conn)
else:
return main_error_404.main_error_404_2(conn) | 40.952381 | 109 | 0.603488 | from .tool.func import *
from . import main_error_404
def main_file_2(conn, data):
curs = conn.cursor()
if data == 'easter_egg.html':
return easy_minify(flask.render_template(skin_check(),
imp = ['easter_egg.html', wiki_set(), custom(), other2([0, 0])],
data = open('./views/main_css/file/easter_egg.html', 'r').read(),
menu = 0
))
elif re.search('\.txt$', data) or data == 'sitemap.xml':
if data == 'robots.txt' and not os.path.exists('robots.txt'):
return flask.Response('User-agent: *\nDisallow: /\nAllow: /$\nAllow: /w/', mimetype='text/plain')
elif os.path.exists(data):
return flask.send_from_directory('./', data)
else:
return main_error_404.main_error_404_2(conn)
else:
return main_error_404.main_error_404_2(conn) | true | true |
f726cb978618b466b6e7a32b01c4590b62b5c7a6 | 3,077 | py | Python | commons/__init__.py | oeg-upm/ttla | ab1cc5a2777b3d4fb905f4452379f469153c904b | [
"Apache-2.0"
] | null | null | null | commons/__init__.py | oeg-upm/ttla | ab1cc5a2777b3d4fb905f4452379f469153c904b | [
"Apache-2.0"
] | 5 | 2019-04-03T12:58:29.000Z | 2021-06-02T00:18:34.000Z | commons/__init__.py | oeg-upm/bob | ab1cc5a2777b3d4fb905f4452379f469153c904b | [
"Apache-2.0"
] | null | null | null | import os
import pandas as pd
from easysparql import *
ENDPOINT = "https://dbpedia.org/sparql"
MIN_NUM_OF_ENT_PER_PROP = 30 # the minimum number of entities per property (get_properties)
QUERY_LIMIT = "" # At the moment, we do not put any limit on the number of results
MIN_NUM_NUMS = 30 # The minimum number of values that will be annotated, this is to ignore small size
proj_path = (os.path.join(os.path.dirname(os.path.realpath(__file__)), os.pardir))
data_dir = os.path.join(proj_path, 'data')
meta_dir = os.path.join(proj_path, 'meta')
models_dir = os.path.join(proj_path, 'local_models')
log_dir = os.path.join(proj_path, 'local_logs')
# kinds
NOMINAL = "nominal"
ORDINAL = "ordinal"
RATIO_INTERVAL = "ratio-interval"
# sub kinds
CATEGORICAL = "categorical"
SEQUENTIAL = "sequential"
HIERARCHICAL = "hierarchical"
RANDOM = "random"
COUNTS = "count"
OTHER = "other"
YEAR = "year"
# I am not sure of the below is useful
# kinds and subkinds
KINDS = {
ORDINAL: [],
NOMINAL: [CATEGORICAL, SEQUENTIAL, HIERARCHICAL, RANDOM],
RATIO_INTERVAL: [COUNTS, OTHER],
YEAR: []
}
def get_column_from_meta(fname, column_id):
"""
:param fname:
:param column_id:
:return:
"""
fdir = os.path.join(data_dir, 'T2Dv2', fname+".csv")
df = pd.read_csv(fdir)
col_name = df.columns.values[column_id]
return list(df[col_name])
def t2dv2_columns_of_kind(num_kind, sub_kind=None):
"""
:param num_kind: nominal, ordinal, ratio-interval
:return: a dataframe of the specified kind
"""
meta_file_dir = os.path.join(meta_dir, 'T2Dv2_typology.csv')
df = pd.read_csv(meta_file_dir)
if sub_kind is None:
dfkind = df[df.kind == num_kind]
else:
dfkind = df[df.kind == num_kind and df.sub_kind == sub_kind]
print(dfkind)
return dfkind
def get_numerics_from_list(nums_str_list):
"""
:param nums_str_list: list of string or numbers or a mix
:return: list of numbers or None if less than 50% are numbers
"""
nums = []
for c in nums_str_list:
n = get_num(c)
if n is not None:
nums.append(n)
if len(nums) < len(nums_str_list)/2:
return None
return nums
def get_num(num_or_str):
"""
:param num_or_str:
:return: number or None if it is not a number
"""
if pd.isna(num_or_str):
return None
elif isinstance(num_or_str, (int, float)):
return num_or_str
elif isinstance(num_or_str, basestring):
if '.' in num_or_str or ',' in num_or_str or num_or_str.isdigit():
try:
return float(num_or_str.replace(',', ''))
except Exception as e:
return None
return None
def class_uri_to_fname(class_uri):
"""
:param class_uri:
:return:
"""
if class_uri[:7] == "http://":
class_dname = class_uri[7:]
elif class_uri[:8] == "https://":
class_dname = class_uri[8:]
class_fname = class_dname.replace('/', '__').replace(',', '').replace('#', '_')#.replace('-', '_')
return class_fname
| 26.756522 | 102 | 0.646409 | import os
import pandas as pd
from easysparql import *
ENDPOINT = "https://dbpedia.org/sparql"
MIN_NUM_OF_ENT_PER_PROP = 30
QUERY_LIMIT = ""
MIN_NUM_NUMS = 30
proj_path = (os.path.join(os.path.dirname(os.path.realpath(__file__)), os.pardir))
data_dir = os.path.join(proj_path, 'data')
meta_dir = os.path.join(proj_path, 'meta')
models_dir = os.path.join(proj_path, 'local_models')
log_dir = os.path.join(proj_path, 'local_logs')
NOMINAL = "nominal"
ORDINAL = "ordinal"
RATIO_INTERVAL = "ratio-interval"
CATEGORICAL = "categorical"
SEQUENTIAL = "sequential"
HIERARCHICAL = "hierarchical"
RANDOM = "random"
COUNTS = "count"
OTHER = "other"
YEAR = "year"
KINDS = {
ORDINAL: [],
NOMINAL: [CATEGORICAL, SEQUENTIAL, HIERARCHICAL, RANDOM],
RATIO_INTERVAL: [COUNTS, OTHER],
YEAR: []
}
def get_column_from_meta(fname, column_id):
fdir = os.path.join(data_dir, 'T2Dv2', fname+".csv")
df = pd.read_csv(fdir)
col_name = df.columns.values[column_id]
return list(df[col_name])
def t2dv2_columns_of_kind(num_kind, sub_kind=None):
meta_file_dir = os.path.join(meta_dir, 'T2Dv2_typology.csv')
df = pd.read_csv(meta_file_dir)
if sub_kind is None:
dfkind = df[df.kind == num_kind]
else:
dfkind = df[df.kind == num_kind and df.sub_kind == sub_kind]
print(dfkind)
return dfkind
def get_numerics_from_list(nums_str_list):
nums = []
for c in nums_str_list:
n = get_num(c)
if n is not None:
nums.append(n)
if len(nums) < len(nums_str_list)/2:
return None
return nums
def get_num(num_or_str):
if pd.isna(num_or_str):
return None
elif isinstance(num_or_str, (int, float)):
return num_or_str
elif isinstance(num_or_str, basestring):
if '.' in num_or_str or ',' in num_or_str or num_or_str.isdigit():
try:
return float(num_or_str.replace(',', ''))
except Exception as e:
return None
return None
def class_uri_to_fname(class_uri):
if class_uri[:7] == "http://":
class_dname = class_uri[7:]
elif class_uri[:8] == "https://":
class_dname = class_uri[8:]
class_fname = class_dname.replace('/', '__').replace(',', '').replace('#', '_')
return class_fname
| true | true |
f726cc74ea48e6d84a1083f2a64bd4568a8f55c4 | 8,841 | py | Python | tools/pkg/tmpl.py | kristoffer-paulsson/angelos | 2ec236770d6530884a8ad88505aab01183f752b4 | [
"MIT"
] | 8 | 2020-06-07T23:26:34.000Z | 2022-03-28T00:20:34.000Z | tools/pkg/tmpl.py | kristoffer-paulsson/angelos | 2ec236770d6530884a8ad88505aab01183f752b4 | [
"MIT"
] | 1 | 2019-12-24T22:06:02.000Z | 2020-07-12T19:18:57.000Z | tools/pkg/tmpl.py | kristoffer-paulsson/angelos | 2ec236770d6530884a8ad88505aab01183f752b4 | [
"MIT"
] | null | null | null | #
# Copyright (c) 2018-2020 by Kristoffer Paulsson <kristoffer.paulsson@talenten.se>.
#
# This software is available under the terms of the MIT license. Parts are licensed under
# different terms if stated. The legal terms are attached to the LICENSE file and are
# made available on:
#
# https://opensource.org/licenses/MIT
#
# SPDX-License-Identifier: MIT
#
# Contributors:
# Kristoffer Paulsson - initial implementation
#
"""Install file templates."""
import datetime
import os
import re
import shutil
from pathlib import Path
from .data import NAME_NIX, VERSION, LICENSE, URL, PERMS_DIR, PERMS_EXEC, PERMS_FILE, EXEC_PREFIX, DIR_ANGELOS, \
FILE_ENV, FILE_CONF, FILE_EXE, USERNAME, GROUPNAME, NAME_SERVICE, DIR_VAR, DIR_LOG, DIR_ETC, FILE_ADMINS, LINK_EXE, \
FILTER, EXEC_SUFFIX, AUTHOR, AUTHOR_EMAIL
RPM_SPEC = """
Name: {namenix}
Version: {version}
Release: {release}
Summary: A safe messaging system.
License: {license}
URL: {url}
Source1: angelos.service
Source2: env.json
Source3: config.json
BuildArch: x86_64
BuildRequires: bzip2-devel, expat-devel, gdbm-devel, ncurses-devel, openssl-devel, readline-devel, sqlite-devel,
BuildRequires: tk-devel, xz-devel, zlib-devel, libffi-devel
BuildRequires: systemd-rpm-macros /usr/bin/pathfix.py
Requires: bzip2-libs, expat, gdbm-libs, ncurses-libs, openssl-libs, readline, sqlite-libs, tk, xz-libs, zlib, libffi
AutoReqProv: no
%description
Ἄγγελος is a safe messenger system. Angelos means "Carrier of a divine message."
%prep
%build
%check
%install
mkdir %{{buildroot}}/opt -p
sudo mv /opt/angelos/ %{{buildroot}}/opt
install --directory %{{buildroot}}{diretc}
install --directory %{{buildroot}}{dirvar}
install --directory %{{buildroot}}{dirlog}
install -D -m 0644 %{{SOURCE1}} %{{buildroot}}%{{_unitdir}}/{nameservice}
install -D -m 0644 %{{SOURCE2}} %{{buildroot}}{fileenv}
install -D -m 0644 %{{SOURCE3}} %{{buildroot}}{fileconf}
pathfix.py -pni "%{{__python3}} %{{py3_shbang_opts}}" %{{buildroot}}/*
%clean
%pre
grep -q {groupname} /etc/group >/dev/null 2>&1 || groupadd {groupname}
id {username} >/dev/null 2>&1 || useradd {username} --system -g {groupname}
%post
%systemd_post {nameservice}
touch {fileadmins}
chown 600 {fileadmins}
chmod {username}:{groupname} {fileadmins}
ln -sf {fileexe} {linkexe}
%preun
%systemd_preun {nameservice}
rm {linkexe}
%postun
%systemd_postun {nameservice}
%changelog
%files
%attr(700, {username}, {groupname}) {dirvar}
%attr(700, {username}, {groupname}) {dirlog}
%{{_unitdir}}/{nameservice}
%config {fileenv}
%config {fileconf}
%defattr({permsfile}, {username}, {groupname}, {permsdir})
{files}
"""
def walk_files(path: str) -> str:
"""Walk all files and directories at install path."""
path = str(Path(path))
output = ""
for root, dirs, files in os.walk(path):
output += "{path}\n".format(
perms=PERMS_DIR, path=root)
for file in files:
filepath = os.path.join(root, file)
output += "%attr({perms}, {username}, {groupname}) {path}\n".format(
perms=PERMS_EXEC, path=filepath,
username=USERNAME, groupname=GROUPNAME
) if root.startswith(EXEC_PREFIX) or file.endswith(EXEC_SUFFIX) else "{path}\n".format(
path=filepath
)
return output
def filter_files(path: str, subs: list = None):
"""Filter all files and directories."""
pattern = "|".join(subs if subs else FILTER)
for root, dirs, files in os.walk(path):
for file in files:
# Deal with file
filepath = os.path.join(root, file)
if re.search(pattern, filepath) and os.path.exists(filepath):
try:
os.remove(filepath)
print("Deleted file:", filepath)
except Exception as e:
print(filepath, e)
# Deal with directory
if re.search(pattern, root) and os.path.exists(root):
try:
shutil.rmtree(root)
print("Deleted directory:", root)
except Exception as e:
print(root, e)
def render_rpm_spec(release: int, full_path: bool=True) -> str:
"""Render the RPM spec file. (angelos.spec)"""
return RPM_SPEC.format(
dirangelos=DIR_ANGELOS, dirvar=DIR_VAR, diretc=DIR_ETC, dirlog=DIR_LOG,
fileenv=FILE_ENV, fileconf=FILE_CONF, fileexe=FILE_EXE, linkexe=LINK_EXE,
fileadmins=FILE_ADMINS, permsexec=PERMS_EXEC, permsfile=PERMS_FILE, permsdir=PERMS_DIR,
username=USERNAME, groupname=GROUPNAME, nameservice=NAME_SERVICE,
namenix=NAME_NIX, url=URL, version=VERSION, release=release, license=LICENSE,
files=walk_files(DIR_ANGELOS)
)
SYSTEMD_UNIT = """
[Unit]
Description = Run the Angelos server
After = network.target
[Service]
Type = forking
AmbientCapabilities = CAP_NET_BIND_SERVICE
ExecStart = {namenix} -d start
ExecStop = {namenix} -d stop
ExecReload = {namenix} -d restart
PIDFile = /tmp/angelos.pid
User = {username}
Group = {groupname}
StateDirectory = {service_dirvar}
LogsDirectory = {service_dirlog}
ConfigurationDirectory = {service_diretc}
KeyringMode = private
[Install]
WantedBy=default.target
"""
def render_systemd_unit(service_full_path: bool=True) -> str:
"""Render systemd unit file. (angelos.service)"""
return SYSTEMD_UNIT.format(
namenix=NAME_NIX, username=USERNAME, groupname=GROUPNAME,
service_dirvar=DIR_VAR if service_full_path else NAME_NIX,
service_dirlog=DIR_LOG if service_full_path else NAME_NIX,
service_diretc=DIR_ETC if service_full_path else NAME_NIX
)
def render_deb_name(release: int) -> str:
"""Render the debian package name."""
return "{namenix}_{version}-{release}_amd64".format(
namenix=NAME_NIX, version=VERSION, release=release)
DEB_CONTROL = """
Package: {namenix}
Version: {version}
Homepage: {url}
Depends: zlib1g, libncurses5, libgdbm6, libnss3, libssl1.1, libreadline7, libffi6, bzip2, libsqlite3-0
Architecture: amd64
Maintainer: {author} <{authoremail}>
Description: Ἄγγελος is a safe messenger system. Angelos means "Carrier of a divine message."
"""
def render_deb_control() -> str:
"""Render the control file. (debian/control)"""
return DEB_CONTROL.format(
namenix=NAME_NIX, version=VERSION, url=URL, author=AUTHOR, authoremail=AUTHOR_EMAIL
)
DEB_COPYRIGHT = """
Format: https://www.debian.org/doc/packaging-manuals/copyright-format/1.0/
Upstream-Name: {namenix}
Upstream-Contact: {author} <{authoremail}>
Source: {url}
Files: *
Copyright: 2018-2020, {author} <{authoremail}>
License: MIT
"""
def render_deb_copyright() -> str:
"""Render the copyright file. (debian/copyright)"""
return DEB_COPYRIGHT.format(
namenix=NAME_NIX, author=AUTHOR, authoremail=AUTHOR_EMAIL, url=URL,
)
DEB_CHANGELOG = """
{namenix} ({version}) testing; urgency=medium
* Initial release.
-- {author} <{authoremail}> {timestamp}
"""
def render_deb_changelog() -> str:
"""Render the changelog file. (debian/changelog)"""
return DEB_CHANGELOG.format(
namenix=NAME_NIX, version=VERSION, author=AUTHOR, authoremail=AUTHOR_EMAIL,
timestamp=datetime.datetime.strftime(
datetime.datetime.now(
datetime.datetime.now(
datetime.timezone.utc).astimezone().tzinfo), "%a, %d %b %Y %X %z"),
)
DEB_RULES = """
#!/usr/bin/make -f
#DH_VERBOSE = 1
#export DEB_BUILD_MAINT_OPTIONS = hardening=+all
#export DEB_CFLAGS_MAINT_APPEND = -Wall -pedantic
#export DEB_LDFLAGS_MAINT_APPEND = -Wl,--as-needed
%:
dh $@
"""
def render_deb_rules() -> str:
"""Render the compat file. (debian/rules)"""
return DEB_RULES.format()
DEB_COMPAT = """
10
"""
def render_deb_compat() -> str:
"""Render the compat file. (debian/compat)"""
return DEB_COMPAT.format()
DEB_CONFFILES = """
"""
def render_deb_conffiles() -> str:
"""Render the conffiles file. (debian/conffiles)"""
return DEB_CONFFILES.format()
DEB_DIRS = """
etc/angelos
var/lib/angelos
var/log/angelos
"""
def render_deb_dirs() -> str:
"""Render the dirs file. (debian/dirs)"""
return DEB_DIRS.format()
DEB_LINKS = """
{fileexe} {linkexe}
"""
def render_deb_links() -> str:
"""Render the dirs file. (debian/angelos.links)"""
return DEB_LINKS.format(fileexe=FILE_EXE, linkexe=LINK_EXE)
ENV_JSON = """{{}}"""
def render_env_json() -> str:
"""Render env configuration file. (env.json)"""
return ENV_JSON.format()
CONFIG_JSON = """{{}}"""
def render_config_json() -> str:
"""Render config configuration file. (config.json)"""
return CONFIG_JSON.format()
ADMINS_PUB = """"""
def render_admins_pub() -> str:
"""Render admins public key file. (admins.pub)"""
return ADMINS_PUB.format()
| 26.54955 | 121 | 0.679222 |
import datetime
import os
import re
import shutil
from pathlib import Path
from .data import NAME_NIX, VERSION, LICENSE, URL, PERMS_DIR, PERMS_EXEC, PERMS_FILE, EXEC_PREFIX, DIR_ANGELOS, \
FILE_ENV, FILE_CONF, FILE_EXE, USERNAME, GROUPNAME, NAME_SERVICE, DIR_VAR, DIR_LOG, DIR_ETC, FILE_ADMINS, LINK_EXE, \
FILTER, EXEC_SUFFIX, AUTHOR, AUTHOR_EMAIL
RPM_SPEC = """
Name: {namenix}
Version: {version}
Release: {release}
Summary: A safe messaging system.
License: {license}
URL: {url}
Source1: angelos.service
Source2: env.json
Source3: config.json
BuildArch: x86_64
BuildRequires: bzip2-devel, expat-devel, gdbm-devel, ncurses-devel, openssl-devel, readline-devel, sqlite-devel,
BuildRequires: tk-devel, xz-devel, zlib-devel, libffi-devel
BuildRequires: systemd-rpm-macros /usr/bin/pathfix.py
Requires: bzip2-libs, expat, gdbm-libs, ncurses-libs, openssl-libs, readline, sqlite-libs, tk, xz-libs, zlib, libffi
AutoReqProv: no
%description
Ἄγγελος is a safe messenger system. Angelos means "Carrier of a divine message."
%prep
%build
%check
%install
mkdir %{{buildroot}}/opt -p
sudo mv /opt/angelos/ %{{buildroot}}/opt
install --directory %{{buildroot}}{diretc}
install --directory %{{buildroot}}{dirvar}
install --directory %{{buildroot}}{dirlog}
install -D -m 0644 %{{SOURCE1}} %{{buildroot}}%{{_unitdir}}/{nameservice}
install -D -m 0644 %{{SOURCE2}} %{{buildroot}}{fileenv}
install -D -m 0644 %{{SOURCE3}} %{{buildroot}}{fileconf}
pathfix.py -pni "%{{__python3}} %{{py3_shbang_opts}}" %{{buildroot}}/*
%clean
%pre
grep -q {groupname} /etc/group >/dev/null 2>&1 || groupadd {groupname}
id {username} >/dev/null 2>&1 || useradd {username} --system -g {groupname}
%post
%systemd_post {nameservice}
touch {fileadmins}
chown 600 {fileadmins}
chmod {username}:{groupname} {fileadmins}
ln -sf {fileexe} {linkexe}
%preun
%systemd_preun {nameservice}
rm {linkexe}
%postun
%systemd_postun {nameservice}
%changelog
%files
%attr(700, {username}, {groupname}) {dirvar}
%attr(700, {username}, {groupname}) {dirlog}
%{{_unitdir}}/{nameservice}
%config {fileenv}
%config {fileconf}
%defattr({permsfile}, {username}, {groupname}, {permsdir})
{files}
"""
def walk_files(path: str) -> str:
path = str(Path(path))
output = ""
for root, dirs, files in os.walk(path):
output += "{path}\n".format(
perms=PERMS_DIR, path=root)
for file in files:
filepath = os.path.join(root, file)
output += "%attr({perms}, {username}, {groupname}) {path}\n".format(
perms=PERMS_EXEC, path=filepath,
username=USERNAME, groupname=GROUPNAME
) if root.startswith(EXEC_PREFIX) or file.endswith(EXEC_SUFFIX) else "{path}\n".format(
path=filepath
)
return output
def filter_files(path: str, subs: list = None):
pattern = "|".join(subs if subs else FILTER)
for root, dirs, files in os.walk(path):
for file in files:
filepath = os.path.join(root, file)
if re.search(pattern, filepath) and os.path.exists(filepath):
try:
os.remove(filepath)
print("Deleted file:", filepath)
except Exception as e:
print(filepath, e)
if re.search(pattern, root) and os.path.exists(root):
try:
shutil.rmtree(root)
print("Deleted directory:", root)
except Exception as e:
print(root, e)
def render_rpm_spec(release: int, full_path: bool=True) -> str:
return RPM_SPEC.format(
dirangelos=DIR_ANGELOS, dirvar=DIR_VAR, diretc=DIR_ETC, dirlog=DIR_LOG,
fileenv=FILE_ENV, fileconf=FILE_CONF, fileexe=FILE_EXE, linkexe=LINK_EXE,
fileadmins=FILE_ADMINS, permsexec=PERMS_EXEC, permsfile=PERMS_FILE, permsdir=PERMS_DIR,
username=USERNAME, groupname=GROUPNAME, nameservice=NAME_SERVICE,
namenix=NAME_NIX, url=URL, version=VERSION, release=release, license=LICENSE,
files=walk_files(DIR_ANGELOS)
)
SYSTEMD_UNIT = """
[Unit]
Description = Run the Angelos server
After = network.target
[Service]
Type = forking
AmbientCapabilities = CAP_NET_BIND_SERVICE
ExecStart = {namenix} -d start
ExecStop = {namenix} -d stop
ExecReload = {namenix} -d restart
PIDFile = /tmp/angelos.pid
User = {username}
Group = {groupname}
StateDirectory = {service_dirvar}
LogsDirectory = {service_dirlog}
ConfigurationDirectory = {service_diretc}
KeyringMode = private
[Install]
WantedBy=default.target
"""
def render_systemd_unit(service_full_path: bool=True) -> str:
return SYSTEMD_UNIT.format(
namenix=NAME_NIX, username=USERNAME, groupname=GROUPNAME,
service_dirvar=DIR_VAR if service_full_path else NAME_NIX,
service_dirlog=DIR_LOG if service_full_path else NAME_NIX,
service_diretc=DIR_ETC if service_full_path else NAME_NIX
)
def render_deb_name(release: int) -> str:
return "{namenix}_{version}-{release}_amd64".format(
namenix=NAME_NIX, version=VERSION, release=release)
DEB_CONTROL = """
Package: {namenix}
Version: {version}
Homepage: {url}
Depends: zlib1g, libncurses5, libgdbm6, libnss3, libssl1.1, libreadline7, libffi6, bzip2, libsqlite3-0
Architecture: amd64
Maintainer: {author} <{authoremail}>
Description: Ἄγγελος is a safe messenger system. Angelos means "Carrier of a divine message."
"""
def render_deb_control() -> str:
return DEB_CONTROL.format(
namenix=NAME_NIX, version=VERSION, url=URL, author=AUTHOR, authoremail=AUTHOR_EMAIL
)
DEB_COPYRIGHT = """
Format: https://www.debian.org/doc/packaging-manuals/copyright-format/1.0/
Upstream-Name: {namenix}
Upstream-Contact: {author} <{authoremail}>
Source: {url}
Files: *
Copyright: 2018-2020, {author} <{authoremail}>
License: MIT
"""
def render_deb_copyright() -> str:
return DEB_COPYRIGHT.format(
namenix=NAME_NIX, author=AUTHOR, authoremail=AUTHOR_EMAIL, url=URL,
)
DEB_CHANGELOG = """
{namenix} ({version}) testing; urgency=medium
* Initial release.
-- {author} <{authoremail}> {timestamp}
"""
def render_deb_changelog() -> str:
return DEB_CHANGELOG.format(
namenix=NAME_NIX, version=VERSION, author=AUTHOR, authoremail=AUTHOR_EMAIL,
timestamp=datetime.datetime.strftime(
datetime.datetime.now(
datetime.datetime.now(
datetime.timezone.utc).astimezone().tzinfo), "%a, %d %b %Y %X %z"),
)
DEB_RULES = """
#!/usr/bin/make -f
#DH_VERBOSE = 1
#export DEB_BUILD_MAINT_OPTIONS = hardening=+all
#export DEB_CFLAGS_MAINT_APPEND = -Wall -pedantic
#export DEB_LDFLAGS_MAINT_APPEND = -Wl,--as-needed
%:
dh $@
"""
def render_deb_rules() -> str:
return DEB_RULES.format()
DEB_COMPAT = """
10
"""
def render_deb_compat() -> str:
return DEB_COMPAT.format()
DEB_CONFFILES = """
"""
def render_deb_conffiles() -> str:
return DEB_CONFFILES.format()
DEB_DIRS = """
etc/angelos
var/lib/angelos
var/log/angelos
"""
def render_deb_dirs() -> str:
return DEB_DIRS.format()
DEB_LINKS = """
{fileexe} {linkexe}
"""
def render_deb_links() -> str:
return DEB_LINKS.format(fileexe=FILE_EXE, linkexe=LINK_EXE)
ENV_JSON = """{{}}"""
def render_env_json() -> str:
return ENV_JSON.format()
CONFIG_JSON = """{{}}"""
def render_config_json() -> str:
return CONFIG_JSON.format()
ADMINS_PUB = """"""
def render_admins_pub() -> str:
return ADMINS_PUB.format()
| true | true |
f726cc8a8a3084e57b5bf0e9e1bfcc87faa21241 | 6,494 | py | Python | dvc/stage/serialize.py | asford/dvc | 4ed55d00511ea3d9115b76c463e1a466408b11ef | [
"Apache-2.0"
] | 1 | 2021-06-18T19:36:13.000Z | 2021-06-18T19:36:13.000Z | dvc/stage/serialize.py | asford/dvc | 4ed55d00511ea3d9115b76c463e1a466408b11ef | [
"Apache-2.0"
] | 82 | 2021-05-04T02:40:05.000Z | 2022-03-31T03:14:04.000Z | dvc/stage/serialize.py | asford/dvc | 4ed55d00511ea3d9115b76c463e1a466408b11ef | [
"Apache-2.0"
] | 2 | 2021-06-14T19:12:25.000Z | 2021-06-14T19:12:29.000Z | from collections import OrderedDict
from functools import partial
from operator import attrgetter
from typing import TYPE_CHECKING, List, no_type_check
from funcy import post_processing
from dvc.dependency import ParamsDependency
from dvc.output import BaseOutput
from dvc.utils.collections import apply_diff
from dvc.utils.serialize import parse_yaml_for_update
from .params import StageParams
from .utils import resolve_wdir, split_params_deps
if TYPE_CHECKING:
from dvc.stage import PipelineStage, Stage
PARAM_PARAMS = ParamsDependency.PARAM_PARAMS
PARAM_PATH = ParamsDependency.PARAM_PATH
PARAM_DEPS = StageParams.PARAM_DEPS
PARAM_OUTS = StageParams.PARAM_OUTS
PARAM_CACHE = BaseOutput.PARAM_CACHE
PARAM_METRIC = BaseOutput.PARAM_METRIC
PARAM_PLOT = BaseOutput.PARAM_PLOT
PARAM_PERSIST = BaseOutput.PARAM_PERSIST
PARAM_CHECKPOINT = BaseOutput.PARAM_CHECKPOINT
PARAM_DESC = BaseOutput.PARAM_DESC
DEFAULT_PARAMS_FILE = ParamsDependency.DEFAULT_PARAMS_FILE
sort_by_path = partial(sorted, key=attrgetter("def_path"))
@post_processing(OrderedDict)
def _get_flags(out):
if out.desc:
yield PARAM_DESC, out.desc
if not out.use_cache:
yield PARAM_CACHE, False
if out.checkpoint:
yield PARAM_CHECKPOINT, True
if out.persist:
yield PARAM_PERSIST, True
if out.plot and isinstance(out.plot, dict):
# notice `out.plot` is not sorted
# `out.plot` is in the same order as is in the file when read
# and, should be dumped as-is without any sorting
yield from out.plot.items()
if out.live and isinstance(out.live, dict):
yield from out.live.items()
def _serialize_out(out):
flags = _get_flags(out)
return out.def_path if not flags else {out.def_path: flags}
@no_type_check
def _serialize_outs(outputs: List[BaseOutput]):
outs, metrics, plots, live = [], [], [], None
for out in sort_by_path(outputs):
bucket = outs
if out.plot:
bucket = plots
elif out.metric:
bucket = metrics
elif out.live:
assert live is None
live = _serialize_out(out)
continue
bucket.append(_serialize_out(out))
return outs, metrics, plots, live
def _serialize_params_keys(params):
"""
Returns the following format of data:
['lr', 'train', {'params2.yaml': ['lr']}]
The output is sorted, with keys of params from default params file being
at the first, and then followed by entry of other files in lexicographic
order. The keys of those custom files are also sorted in the same order.
"""
keys = []
for param_dep in sort_by_path(params):
dump = param_dep.dumpd()
path, params = dump[PARAM_PATH], dump[PARAM_PARAMS]
assert isinstance(params, (dict, list))
# when on no_exec, params are not filled and are saved as list
k = sorted(params.keys() if isinstance(params, dict) else params)
if not k:
continue
if path == DEFAULT_PARAMS_FILE:
keys = k + keys
else:
keys.append({path: k})
return keys
@no_type_check
def _serialize_params_values(params: List[ParamsDependency]):
"""Returns output of following format, used for lockfile:
{'params.yaml': {'lr': '1', 'train': 2}, {'params2.yaml': {'lr': '1'}}
Default params file are always kept at the start, followed by others in
alphabetical order. The param values are sorted too(not recursively though)
"""
key_vals = OrderedDict()
for param_dep in sort_by_path(params):
dump = param_dep.dumpd()
path, params = dump[PARAM_PATH], dump[PARAM_PARAMS]
if isinstance(params, dict):
kv = [(key, params[key]) for key in sorted(params.keys())]
key_vals[path] = OrderedDict(kv)
if path == DEFAULT_PARAMS_FILE:
key_vals.move_to_end(path, last=False)
return key_vals
def to_pipeline_file(stage: "PipelineStage"):
wdir = resolve_wdir(stage.wdir, stage.path)
params, deps = split_params_deps(stage)
deps = sorted(d.def_path for d in deps)
params = _serialize_params_keys(params)
outs, metrics, plots, live = _serialize_outs(stage.outs)
cmd = stage.cmd
assert cmd, (
f"'{stage.PARAM_CMD}' cannot be empty for stage '{stage.name}', "
f"got: '{cmd}'(type: '{type(cmd).__name__}')"
)
res = [
(stage.PARAM_DESC, stage.desc),
(stage.PARAM_CMD, stage.cmd),
(stage.PARAM_WDIR, wdir),
(stage.PARAM_DEPS, deps),
(stage.PARAM_PARAMS, params),
(stage.PARAM_OUTS, outs),
(stage.PARAM_METRICS, metrics),
(stage.PARAM_PLOTS, plots),
(stage.PARAM_LIVE, live),
(stage.PARAM_FROZEN, stage.frozen),
(stage.PARAM_ALWAYS_CHANGED, stage.always_changed),
(stage.PARAM_META, stage.meta),
]
return {
stage.name: OrderedDict([(key, value) for key, value in res if value])
}
def to_single_stage_lockfile(stage: "Stage") -> dict:
assert stage.cmd
def _dumpd(item):
ret = [
(item.PARAM_PATH, item.def_path),
*item.hash_info.to_dict().items(),
]
if item.isexec:
ret.append((item.PARAM_ISEXEC, True))
return OrderedDict(ret)
res = OrderedDict([("cmd", stage.cmd)])
params, deps = split_params_deps(stage)
deps, outs = [
[_dumpd(item) for item in sort_by_path(items)]
for items in [deps, stage.outs]
]
params = _serialize_params_values(params)
if deps:
res[PARAM_DEPS] = deps
if params:
res[PARAM_PARAMS] = params
if outs:
res[PARAM_OUTS] = outs
return res
def to_lockfile(stage: "PipelineStage") -> dict:
assert stage.name
return {stage.name: to_single_stage_lockfile(stage)}
def to_single_stage_file(stage: "Stage"):
state = stage.dumpd()
# When we load a stage we parse yaml with a fast parser, which strips
# off all the comments and formatting. To retain those on update we do
# a trick here:
# - reparse the same yaml text with a slow but smart ruamel yaml parser
# - apply changes to a returned structure
# - serialize it
text = stage._stage_text # noqa, pylint: disable=protected-access
if text is not None:
saved_state = parse_yaml_for_update(text, stage.path)
apply_diff(state, saved_state)
state = saved_state
return state
| 31.221154 | 79 | 0.665075 | from collections import OrderedDict
from functools import partial
from operator import attrgetter
from typing import TYPE_CHECKING, List, no_type_check
from funcy import post_processing
from dvc.dependency import ParamsDependency
from dvc.output import BaseOutput
from dvc.utils.collections import apply_diff
from dvc.utils.serialize import parse_yaml_for_update
from .params import StageParams
from .utils import resolve_wdir, split_params_deps
if TYPE_CHECKING:
from dvc.stage import PipelineStage, Stage
PARAM_PARAMS = ParamsDependency.PARAM_PARAMS
PARAM_PATH = ParamsDependency.PARAM_PATH
PARAM_DEPS = StageParams.PARAM_DEPS
PARAM_OUTS = StageParams.PARAM_OUTS
PARAM_CACHE = BaseOutput.PARAM_CACHE
PARAM_METRIC = BaseOutput.PARAM_METRIC
PARAM_PLOT = BaseOutput.PARAM_PLOT
PARAM_PERSIST = BaseOutput.PARAM_PERSIST
PARAM_CHECKPOINT = BaseOutput.PARAM_CHECKPOINT
PARAM_DESC = BaseOutput.PARAM_DESC
DEFAULT_PARAMS_FILE = ParamsDependency.DEFAULT_PARAMS_FILE
sort_by_path = partial(sorted, key=attrgetter("def_path"))
@post_processing(OrderedDict)
def _get_flags(out):
if out.desc:
yield PARAM_DESC, out.desc
if not out.use_cache:
yield PARAM_CACHE, False
if out.checkpoint:
yield PARAM_CHECKPOINT, True
if out.persist:
yield PARAM_PERSIST, True
if out.plot and isinstance(out.plot, dict):
yield from out.plot.items()
if out.live and isinstance(out.live, dict):
yield from out.live.items()
def _serialize_out(out):
flags = _get_flags(out)
return out.def_path if not flags else {out.def_path: flags}
@no_type_check
def _serialize_outs(outputs: List[BaseOutput]):
outs, metrics, plots, live = [], [], [], None
for out in sort_by_path(outputs):
bucket = outs
if out.plot:
bucket = plots
elif out.metric:
bucket = metrics
elif out.live:
assert live is None
live = _serialize_out(out)
continue
bucket.append(_serialize_out(out))
return outs, metrics, plots, live
def _serialize_params_keys(params):
keys = []
for param_dep in sort_by_path(params):
dump = param_dep.dumpd()
path, params = dump[PARAM_PATH], dump[PARAM_PARAMS]
assert isinstance(params, (dict, list))
k = sorted(params.keys() if isinstance(params, dict) else params)
if not k:
continue
if path == DEFAULT_PARAMS_FILE:
keys = k + keys
else:
keys.append({path: k})
return keys
@no_type_check
def _serialize_params_values(params: List[ParamsDependency]):
key_vals = OrderedDict()
for param_dep in sort_by_path(params):
dump = param_dep.dumpd()
path, params = dump[PARAM_PATH], dump[PARAM_PARAMS]
if isinstance(params, dict):
kv = [(key, params[key]) for key in sorted(params.keys())]
key_vals[path] = OrderedDict(kv)
if path == DEFAULT_PARAMS_FILE:
key_vals.move_to_end(path, last=False)
return key_vals
def to_pipeline_file(stage: "PipelineStage"):
wdir = resolve_wdir(stage.wdir, stage.path)
params, deps = split_params_deps(stage)
deps = sorted(d.def_path for d in deps)
params = _serialize_params_keys(params)
outs, metrics, plots, live = _serialize_outs(stage.outs)
cmd = stage.cmd
assert cmd, (
f"'{stage.PARAM_CMD}' cannot be empty for stage '{stage.name}', "
f"got: '{cmd}'(type: '{type(cmd).__name__}')"
)
res = [
(stage.PARAM_DESC, stage.desc),
(stage.PARAM_CMD, stage.cmd),
(stage.PARAM_WDIR, wdir),
(stage.PARAM_DEPS, deps),
(stage.PARAM_PARAMS, params),
(stage.PARAM_OUTS, outs),
(stage.PARAM_METRICS, metrics),
(stage.PARAM_PLOTS, plots),
(stage.PARAM_LIVE, live),
(stage.PARAM_FROZEN, stage.frozen),
(stage.PARAM_ALWAYS_CHANGED, stage.always_changed),
(stage.PARAM_META, stage.meta),
]
return {
stage.name: OrderedDict([(key, value) for key, value in res if value])
}
def to_single_stage_lockfile(stage: "Stage") -> dict:
assert stage.cmd
def _dumpd(item):
ret = [
(item.PARAM_PATH, item.def_path),
*item.hash_info.to_dict().items(),
]
if item.isexec:
ret.append((item.PARAM_ISEXEC, True))
return OrderedDict(ret)
res = OrderedDict([("cmd", stage.cmd)])
params, deps = split_params_deps(stage)
deps, outs = [
[_dumpd(item) for item in sort_by_path(items)]
for items in [deps, stage.outs]
]
params = _serialize_params_values(params)
if deps:
res[PARAM_DEPS] = deps
if params:
res[PARAM_PARAMS] = params
if outs:
res[PARAM_OUTS] = outs
return res
def to_lockfile(stage: "PipelineStage") -> dict:
assert stage.name
return {stage.name: to_single_stage_lockfile(stage)}
def to_single_stage_file(stage: "Stage"):
state = stage.dumpd()
text = stage._stage_text
if text is not None:
saved_state = parse_yaml_for_update(text, stage.path)
apply_diff(state, saved_state)
state = saved_state
return state
| true | true |
f726cca87b9b1703027d92c330e14876f773484c | 8,401 | py | Python | mmdet/models/losses/my_cross_entropy_loss.py | dyabel/wsod-mmdet | 60fc1993ea298f992b160b5599a6134702ac0d4f | [
"Apache-2.0"
] | 6 | 2021-10-09T05:34:04.000Z | 2022-03-31T00:36:55.000Z | mmdet/models/losses/my_cross_entropy_loss.py | dyabel/wsod-mmdet | 60fc1993ea298f992b160b5599a6134702ac0d4f | [
"Apache-2.0"
] | null | null | null | mmdet/models/losses/my_cross_entropy_loss.py | dyabel/wsod-mmdet | 60fc1993ea298f992b160b5599a6134702ac0d4f | [
"Apache-2.0"
] | null | null | null | import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from ..builder import LOSSES
from .utils import weight_reduce_loss
eps = 0.000001
def cross_entropy_without_softmax(pred,
label,
weight=None,
reduction='mean',
avg_factor=None,
class_weight=None):
"""Calculate the CrossEntropy loss.
Args:
pred (torch.Tensor): The prediction with shape (N, C), C is the number
of classes.
label (torch.Tensor): The learning label of the prediction.
weight (torch.Tensor, optional): Sample-wise loss weight.
reduction (str, optional): The method used to reduce the loss.
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
class_weight (list[float], optional): The weight for each class.
Returns:
torch.Tensor: The calculated loss
"""
# element-wise losses
#loss = F.cross_entropy(pred, label, weight=class_weight, reduction='none')
loss = F.nll_loss(torch.log(pred), label, reduction = 'none')
# apply weights and do the reduction
if weight is not None:
weight = weight.float()
loss = weight_reduce_loss(
loss, weight=weight, reduction=reduction, avg_factor=avg_factor)
return loss
def cross_entropy(pred,
label,
weight=None,
reduction='mean',
avg_factor=None,
class_weight=None):
"""Calculate the CrossEntropy loss.
Args:
pred (torch.Tensor): The prediction with shape (N, C), C is the number
of classes.
label (torch.Tensor): The learning label of the prediction.
weight (torch.Tensor, optional): Sample-wise loss weight.
reduction (str, optional): The method used to reduce the loss.
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
class_weight (list[float], optional): The weight for each class.
Returns:
torch.Tensor: The calculated loss
"""
# element-wise losses
loss = F.cross_entropy(pred, label, weight=class_weight, reduction='none')
# apply weights and do the reduction
if weight is not None:
weight = weight.float()
loss = weight_reduce_loss(
loss, weight=weight, reduction=reduction, avg_factor=avg_factor)
return loss
def _expand_onehot_labels(labels, label_weights, label_channels):
bin_labels = labels.new_full((labels.size(0), label_channels), 0)
inds = torch.nonzero(
(labels >= 0) & (labels < label_channels), as_tuple=False).squeeze()
if inds.numel() > 0:
bin_labels[inds, labels[inds]] = 1
if label_weights is None:
bin_label_weights = None
else:
bin_label_weights = label_weights.view(-1, 1).expand(
label_weights.size(0), label_channels)
return bin_labels, bin_label_weights
def binary_cross_entropy(pred,
label,
weight=None,
reduction='mean',
avg_factor=None,
class_weight=None):
"""Calculate the binary CrossEntropy loss.
Args:
pred (torch.Tensor): The prediction with shape (N, 1).
label (torch.Tensor): The learning label of the prediction.
weight (torch.Tensor, optional): Sample-wise loss weight.
reduction (str, optional): The method used to reduce the loss.
Options are "none", "mean" and "sum".
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
class_weight (list[float], optional): The weight for each class.
Returns:
torch.Tensor: The calculated loss
"""
if pred.dim() != label.dim():
label, weight = _expand_onehot_labels(label, weight, pred.size(-1))
# weighted element-wise losses
if weight is not None:
weight = weight.float()
pred = pred.clamp(1e-6,1-1e-6)
label = label.clamp(0,1)
loss = F.binary_cross_entropy(pred,label)
return loss
def mask_cross_entropy(pred,
target,
label,
reduction='mean',
avg_factor=None,
class_weight=None):
"""Calculate the CrossEntropy loss for masks.
Args:
pred (torch.Tensor): The prediction with shape (N, C), C is the number
of classes.
target (torch.Tensor): The learning label of the prediction.
label (torch.Tensor): ``label`` indicates the class label of the mask'
corresponding object. This will be used to select the mask in the
of the class which the object belongs to when the mask prediction
if not class-agnostic.
reduction (str, optional): The method used to reduce the loss.
Options are "none", "mean" and "sum".
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
class_weight (list[float], optional): The weight for each class.
Returns:
torch.Tensor: The calculated loss
"""
# TODO: handle these two reserved arguments
assert reduction == 'mean' and avg_factor is None
num_rois = pred.size()[0]
inds = torch.arange(0, num_rois, dtype=torch.long, device=pred.device)
pred_slice = pred[inds, label].squeeze(1)
return F.binary_cross_entropy_with_logits(
pred_slice, target, weight=class_weight, reduction='mean')[None]
@LOSSES.register_module()
class MyCrossEntropyLoss(nn.Module):
def __init__(self,
use_sigmoid=False,
use_mask=False,
reduction='mean',
class_weight=None,
loss_weight=1.0):
"""CrossEntropyLoss.
Args:
use_sigmoid (bool, optional): Whether the prediction uses sigmoid
of softmax. Defaults to False.
use_mask (bool, optional): Whether to use mask cross entropy loss.
Defaults to False.
reduction (str, optional): . Defaults to 'mean'.
Options are "none", "mean" and "sum".
class_weight (list[float], optional): Weight of each class.
Defaults to None.
loss_weight (float, optional): Weight of the loss. Defaults to 1.0.
"""
super(MyCrossEntropyLoss, self).__init__()
assert (use_sigmoid is False) or (use_mask is False)
self.use_sigmoid = use_sigmoid
self.use_mask = use_mask
self.reduction = reduction
self.loss_weight = loss_weight
self.class_weight = class_weight
self.cls_criterion = binary_cross_entropy
def forward(self,
cls_score,
label,
weight=None,
avg_factor=None,
reduction_override=None,
**kwargs):
"""Forward function.
Args:
cls_score (torch.Tensor): The prediction.
label (torch.Tensor): The learning label of the prediction.
weight (torch.Tensor, optional): Sample-wise loss weight.
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
reduction (str, optional): The method used to reduce the loss.
Options are "none", "mean" and "sum".
Returns:
torch.Tensor: The calculated loss
"""
assert reduction_override in (None, 'none', 'mean', 'sum')
reduction = (
reduction_override if reduction_override else self.reduction)
if self.class_weight is not None:
class_weight = cls_score.new_tensor(
self.class_weight, device=cls_score.device)
else:
class_weight = None
loss_cls = self.loss_weight * self.cls_criterion(
cls_score,
label,
weight,
class_weight=class_weight,
reduction=reduction,
avg_factor=avg_factor,
**kwargs)
return loss_cls
| 36.055794 | 79 | 0.59612 | import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from ..builder import LOSSES
from .utils import weight_reduce_loss
eps = 0.000001
def cross_entropy_without_softmax(pred,
label,
weight=None,
reduction='mean',
avg_factor=None,
class_weight=None):
loss = F.nll_loss(torch.log(pred), label, reduction = 'none')
if weight is not None:
weight = weight.float()
loss = weight_reduce_loss(
loss, weight=weight, reduction=reduction, avg_factor=avg_factor)
return loss
def cross_entropy(pred,
label,
weight=None,
reduction='mean',
avg_factor=None,
class_weight=None):
loss = F.cross_entropy(pred, label, weight=class_weight, reduction='none')
if weight is not None:
weight = weight.float()
loss = weight_reduce_loss(
loss, weight=weight, reduction=reduction, avg_factor=avg_factor)
return loss
def _expand_onehot_labels(labels, label_weights, label_channels):
bin_labels = labels.new_full((labels.size(0), label_channels), 0)
inds = torch.nonzero(
(labels >= 0) & (labels < label_channels), as_tuple=False).squeeze()
if inds.numel() > 0:
bin_labels[inds, labels[inds]] = 1
if label_weights is None:
bin_label_weights = None
else:
bin_label_weights = label_weights.view(-1, 1).expand(
label_weights.size(0), label_channels)
return bin_labels, bin_label_weights
def binary_cross_entropy(pred,
label,
weight=None,
reduction='mean',
avg_factor=None,
class_weight=None):
if pred.dim() != label.dim():
label, weight = _expand_onehot_labels(label, weight, pred.size(-1))
if weight is not None:
weight = weight.float()
pred = pred.clamp(1e-6,1-1e-6)
label = label.clamp(0,1)
loss = F.binary_cross_entropy(pred,label)
return loss
def mask_cross_entropy(pred,
target,
label,
reduction='mean',
avg_factor=None,
class_weight=None):
assert reduction == 'mean' and avg_factor is None
num_rois = pred.size()[0]
inds = torch.arange(0, num_rois, dtype=torch.long, device=pred.device)
pred_slice = pred[inds, label].squeeze(1)
return F.binary_cross_entropy_with_logits(
pred_slice, target, weight=class_weight, reduction='mean')[None]
@LOSSES.register_module()
class MyCrossEntropyLoss(nn.Module):
def __init__(self,
use_sigmoid=False,
use_mask=False,
reduction='mean',
class_weight=None,
loss_weight=1.0):
super(MyCrossEntropyLoss, self).__init__()
assert (use_sigmoid is False) or (use_mask is False)
self.use_sigmoid = use_sigmoid
self.use_mask = use_mask
self.reduction = reduction
self.loss_weight = loss_weight
self.class_weight = class_weight
self.cls_criterion = binary_cross_entropy
def forward(self,
cls_score,
label,
weight=None,
avg_factor=None,
reduction_override=None,
**kwargs):
assert reduction_override in (None, 'none', 'mean', 'sum')
reduction = (
reduction_override if reduction_override else self.reduction)
if self.class_weight is not None:
class_weight = cls_score.new_tensor(
self.class_weight, device=cls_score.device)
else:
class_weight = None
loss_cls = self.loss_weight * self.cls_criterion(
cls_score,
label,
weight,
class_weight=class_weight,
reduction=reduction,
avg_factor=avg_factor,
**kwargs)
return loss_cls
| true | true |
f726ce223abc85e16691c9ec990fbe29a1aa1ef0 | 400 | py | Python | sys_monitor/wsgi.py | PeterXUYAOHAI/System_Monitor | 2b78107a7f87e13ebab38ea5a89c870ef5415fd2 | [
"MIT"
] | 2 | 2018-05-07T03:30:55.000Z | 2018-05-10T11:27:18.000Z | sys_monitor/wsgi.py | PeterXUYAOHAI/System_Monitor | 2b78107a7f87e13ebab38ea5a89c870ef5415fd2 | [
"MIT"
] | null | null | null | sys_monitor/wsgi.py | PeterXUYAOHAI/System_Monitor | 2b78107a7f87e13ebab38ea5a89c870ef5415fd2 | [
"MIT"
] | null | null | null | """
WSGI config for sys_monitor project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "sys_monitor.settings")
application = get_wsgi_application()
| 23.529412 | 78 | 0.79 |
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "sys_monitor.settings")
application = get_wsgi_application()
| true | true |
f726cee2a25f6da66c15f0d51afb12dd8579d0fe | 6,439 | py | Python | python/oneflow/nn/optimizer/sgd.py | grybd/oneflow | 82237ad096a10527591660c09b61444c42917e69 | [
"Apache-2.0"
] | 3,285 | 2020-07-31T05:51:22.000Z | 2022-03-31T15:20:16.000Z | python/oneflow/nn/optimizer/sgd.py | grybd/oneflow | 82237ad096a10527591660c09b61444c42917e69 | [
"Apache-2.0"
] | 2,417 | 2020-07-31T06:28:58.000Z | 2022-03-31T23:04:14.000Z | python/oneflow/nn/optimizer/sgd.py | grybd/oneflow | 82237ad096a10527591660c09b61444c42917e69 | [
"Apache-2.0"
] | 520 | 2020-07-31T05:52:42.000Z | 2022-03-29T02:38:11.000Z | """
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import collections
from typing import Callable, Dict, Iterator, List, Union
import oneflow as flow
from oneflow.nn.parameter import Parameter
from .optimizer import Optimizer, ParamGroup
class SGD(Optimizer):
"""Implements SGD algorithm.
This algorithm takes a random sample’s gradient as an approximate estimate of
the overall gradient in small batch gradient descent.
When the momentum = 0, the equation of parameters updating is:
.. math::
param_{new} = param_{old} - learning\\_rate * grad
With momentum, the equation of parameters updating is:
.. math::
& V_t = \\beta * V_{t-1} - learning\\_rate * (g_t + param_{old} * weight\\_decay)
& param_{new} = param_{old} + V_t
Args:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
lr (float, optional): learning rate (default: 1e-3)
momentum (float, optional): Momentum factor (default: 0.0)
weight_decay (float, optional): weight decay (L2 penalty) (default: 0.0)
For example:
Example 1:
.. code-block:: python
# Assume net is a custom model.
sgd = flow.optim.SGD(net.parameters(), lr=1e-3)
for epoch in range(epochs):
# Read data, Compute the loss and so on.
# ...
loss.backward()
sgd.step()
sgd.zero_grad()
Example 2:
.. code-block:: python
# Assume net is a custom model.
sgd = flow.optim.SGD(
[
{
"params": net.parameters(),
"lr": learning_rate,
"clip_grad_max_norm": 0.5,
"clip_grad_norm_type": 2.0,
}
],
)
for epoch in range(epochs):
# Read data, Compute the loss and so on.
# ...
loss.backward()
sgd.clip_grad()
sgd.step()
sgd.zero_grad()
If you want to use clip_grad, you can refer this example.
For more details of `clip_grad_max_norm` and `clip_grad_norm_type`, you can refer to :func:`oneflow.nn.utils.clip_grad_norm_`.
"""
def __init__(
self,
parameters: Union[Iterator[Parameter], List[Dict]],
lr: float = 0.001,
momentum: float = 0.0,
weight_decay: float = 0.0,
):
assert lr >= 0.0, f"Invalid learning rate: {lr}"
assert momentum >= 0.0, f"Invalid momentum: {momentum}"
assert weight_decay >= 0.0, f"Invalid weight_decay: {weight_decay}"
options = dict()
options["lr"] = lr
options["momentum"] = momentum
options["weight_decay"] = weight_decay
super().__init__(parameters, options)
for param_group in self.param_groups:
for param in param_group.parameters:
assert param.is_leaf, "parameters must be leaf tensor"
self._state[param] = dict()
self._momentum_sgd = (
flow.builtin_op("momentum_update")
.Input("model")
.Input("model_diff")
.Input("momentum")
.Attr("l1", 0.0)
.Attr("weight_decay", 0.0)
.Build()
)
self._sgd = (
flow.builtin_op("sgd_update")
.Input("model")
.Input("model_diff")
.Attr("weight_decay", 0.0)
.Attr("l1", 0.0)
.Build()
)
def step(self, closure: Callable = None):
with flow.no_grad():
loss = None
if closure is not None:
loss = closure()
for param_group in self.param_groups:
lr = param_group["lr"]
l2 = param_group["weight_decay"]
for param in param_group.parameters:
if param.grad is None:
continue
if param_group["momentum"] == 0.0:
self._sgd(param, param.grad, learning_rate_val=lr, l2=l2)
else:
if "momentum_buf" not in self._state[param]:
self._state[param]["momentum_buf"] = flow.zeros_like(param)
momentum_buf = self._state[param]["momentum_buf"]
beta = param_group["momentum"]
self._momentum_sgd(
param,
param.grad,
momentum_buf,
learning_rate_val=lr,
l2=l2,
beta=beta,
)
self._state["step"] = self._state["step"] + 1
return loss
def _generate_conf_for_graph(self, train_conf, vars_conf):
new_opt_confs = []
for param_group in self.param_groups:
optimizer_conf = train_conf.mutable_optimizer_conf().Add()
lr = (
param_group["initial_lr"]
if "initial_lr" in param_group
else param_group["lr"]
)
beta = param_group["momentum"]
l2 = param_group["weight_decay"]
optimizer_conf.set_base_learning_rate(lr)
if beta == 0:
optimizer_conf.mutable_naive_conf()
else:
optimizer_conf.mutable_momentum_conf().set_beta(beta)
self._generate_grad_clip_conf_for_optim_conf(param_group, optimizer_conf)
for param in param_group.parameters:
vars_conf[param].l2 = l2
if param.requires_grad:
optimizer_conf.add_variable_op_names(vars_conf[param].name)
new_opt_confs.append(optimizer_conf)
return new_opt_confs
| 33.362694 | 131 | 0.55226 | import collections
from typing import Callable, Dict, Iterator, List, Union
import oneflow as flow
from oneflow.nn.parameter import Parameter
from .optimizer import Optimizer, ParamGroup
class SGD(Optimizer):
def __init__(
self,
parameters: Union[Iterator[Parameter], List[Dict]],
lr: float = 0.001,
momentum: float = 0.0,
weight_decay: float = 0.0,
):
assert lr >= 0.0, f"Invalid learning rate: {lr}"
assert momentum >= 0.0, f"Invalid momentum: {momentum}"
assert weight_decay >= 0.0, f"Invalid weight_decay: {weight_decay}"
options = dict()
options["lr"] = lr
options["momentum"] = momentum
options["weight_decay"] = weight_decay
super().__init__(parameters, options)
for param_group in self.param_groups:
for param in param_group.parameters:
assert param.is_leaf, "parameters must be leaf tensor"
self._state[param] = dict()
self._momentum_sgd = (
flow.builtin_op("momentum_update")
.Input("model")
.Input("model_diff")
.Input("momentum")
.Attr("l1", 0.0)
.Attr("weight_decay", 0.0)
.Build()
)
self._sgd = (
flow.builtin_op("sgd_update")
.Input("model")
.Input("model_diff")
.Attr("weight_decay", 0.0)
.Attr("l1", 0.0)
.Build()
)
def step(self, closure: Callable = None):
with flow.no_grad():
loss = None
if closure is not None:
loss = closure()
for param_group in self.param_groups:
lr = param_group["lr"]
l2 = param_group["weight_decay"]
for param in param_group.parameters:
if param.grad is None:
continue
if param_group["momentum"] == 0.0:
self._sgd(param, param.grad, learning_rate_val=lr, l2=l2)
else:
if "momentum_buf" not in self._state[param]:
self._state[param]["momentum_buf"] = flow.zeros_like(param)
momentum_buf = self._state[param]["momentum_buf"]
beta = param_group["momentum"]
self._momentum_sgd(
param,
param.grad,
momentum_buf,
learning_rate_val=lr,
l2=l2,
beta=beta,
)
self._state["step"] = self._state["step"] + 1
return loss
def _generate_conf_for_graph(self, train_conf, vars_conf):
new_opt_confs = []
for param_group in self.param_groups:
optimizer_conf = train_conf.mutable_optimizer_conf().Add()
lr = (
param_group["initial_lr"]
if "initial_lr" in param_group
else param_group["lr"]
)
beta = param_group["momentum"]
l2 = param_group["weight_decay"]
optimizer_conf.set_base_learning_rate(lr)
if beta == 0:
optimizer_conf.mutable_naive_conf()
else:
optimizer_conf.mutable_momentum_conf().set_beta(beta)
self._generate_grad_clip_conf_for_optim_conf(param_group, optimizer_conf)
for param in param_group.parameters:
vars_conf[param].l2 = l2
if param.requires_grad:
optimizer_conf.add_variable_op_names(vars_conf[param].name)
new_opt_confs.append(optimizer_conf)
return new_opt_confs
| true | true |
f726cff10848dfee859add52644fda3f040aa102 | 966 | py | Python | nms/benchmark/nms_numba_cpu.py | ForrestPi/ObjectDetection | 54e0821e73f67be5360c36f01229a123c34ab3b3 | [
"MIT"
] | 12 | 2020-03-25T01:24:22.000Z | 2021-09-18T06:40:16.000Z | nms/benchmark/nms_numba_cpu.py | ForrestPi/ObjectDetection | 54e0821e73f67be5360c36f01229a123c34ab3b3 | [
"MIT"
] | 1 | 2020-04-22T07:52:36.000Z | 2020-04-22T07:52:36.000Z | nms/benchmark/nms_numba_cpu.py | ForrestPi/ObjectDetection | 54e0821e73f67be5360c36f01229a123c34ab3b3 | [
"MIT"
] | 4 | 2020-03-25T01:24:26.000Z | 2020-09-20T11:29:09.000Z | from __future__ import absolute_import
import numba
import numpy as np
@numba.jit(nopython=True)
def nms_cpu(dets, thresh):
x1 = dets[:, 0]
y1 = dets[:, 1]
x2 = dets[:, 2]
y2 = dets[:, 3]
scores = dets[:, 4]
areas = (x2 - x1 + 1) * (y2 - y1 + 1)
order = scores.argsort()[::-1]
keep = []
while order.size > 0:
i = order[0]
keep.append(i)
xx1 = np.maximum(x1[i], x1[order[1:]])
yy1 = np.maximum(y1[i], y1[order[1:]])
xx2 = np.minimum(x2[i], x2[order[1:]])
yy2 = np.minimum(y2[i], y2[order[1:]])
w = np.maximum(0.0, xx2 - xx1 + 1)
h = np.maximum(0.0, yy2 - yy1 + 1)
inter = w * h
ovr = inter / (areas[i] + areas[order[1:]] - inter)
inds = np.where(ovr <= thresh)[0]
order = order[inds + 1]
return keep
if __name__ == "__main__":
bbox=np.load("bbox.npy")
print(bbox.shape)
keep=nms_cpu(bbox,0.7)
print(len(keep))
| 25.421053 | 59 | 0.519669 | from __future__ import absolute_import
import numba
import numpy as np
@numba.jit(nopython=True)
def nms_cpu(dets, thresh):
x1 = dets[:, 0]
y1 = dets[:, 1]
x2 = dets[:, 2]
y2 = dets[:, 3]
scores = dets[:, 4]
areas = (x2 - x1 + 1) * (y2 - y1 + 1)
order = scores.argsort()[::-1]
keep = []
while order.size > 0:
i = order[0]
keep.append(i)
xx1 = np.maximum(x1[i], x1[order[1:]])
yy1 = np.maximum(y1[i], y1[order[1:]])
xx2 = np.minimum(x2[i], x2[order[1:]])
yy2 = np.minimum(y2[i], y2[order[1:]])
w = np.maximum(0.0, xx2 - xx1 + 1)
h = np.maximum(0.0, yy2 - yy1 + 1)
inter = w * h
ovr = inter / (areas[i] + areas[order[1:]] - inter)
inds = np.where(ovr <= thresh)[0]
order = order[inds + 1]
return keep
if __name__ == "__main__":
bbox=np.load("bbox.npy")
print(bbox.shape)
keep=nms_cpu(bbox,0.7)
print(len(keep))
| true | true |
f726d15285b7c2f6ec452d3585c75aaddfd2bc1d | 594 | py | Python | examples/vagrant_todo/provision/recipes/project.py | avladev/pypro | 7eb98c5ebd9830104689d105c36424b24c72b475 | [
"MIT"
] | null | null | null | examples/vagrant_todo/provision/recipes/project.py | avladev/pypro | 7eb98c5ebd9830104689d105c36424b24c72b475 | [
"MIT"
] | null | null | null | examples/vagrant_todo/provision/recipes/project.py | avladev/pypro | 7eb98c5ebd9830104689d105c36424b24c72b475 | [
"MIT"
] | 1 | 2019-07-15T21:35:03.000Z | 2019-07-15T21:35:03.000Z | import pypro.core
import os
class CreateConfig(pypro.core.Recipe):
def __init__(self, source, destination):
self.source = source
self.destination = destination
def run(self, runner, arguments=None):
# Read the template file
content = ''
with open(self.source, 'r') as f:
content = f.read(os.path.getsize(self.source))
# Replace notations with actual values
content = pypro.core.Variables.replace(content)
# Write the config file
with open(self.destination, 'w') as f:
f.write(content) | 27 | 58 | 0.622896 | import pypro.core
import os
class CreateConfig(pypro.core.Recipe):
def __init__(self, source, destination):
self.source = source
self.destination = destination
def run(self, runner, arguments=None):
content = ''
with open(self.source, 'r') as f:
content = f.read(os.path.getsize(self.source))
content = pypro.core.Variables.replace(content)
with open(self.destination, 'w') as f:
f.write(content) | true | true |
f726d1ac18979248f061387ecccea5858da651fb | 974 | py | Python | for python/data/ggiramahor/pframe.py | aerolalit/Auto-Testing-Python-Programs | dd49ab266c9f0fd8e34278f68f8af017711942e3 | [
"MIT"
] | 4 | 2019-10-03T21:16:51.000Z | 2019-10-04T01:28:08.000Z | for python/data/ggiramahor/pframe.py | aerolalit/Auto-Testing | dd49ab266c9f0fd8e34278f68f8af017711942e3 | [
"MIT"
] | null | null | null | for python/data/ggiramahor/pframe.py | aerolalit/Auto-Testing | dd49ab266c9f0fd8e34278f68f8af017711942e3 | [
"MIT"
] | null | null | null | #350111
#a3-p10.py
#Gloria Giramahoro
#g.giramahoro@jacobs-university.de
#1.defining a function that prints a rectangle made of a character
def print_frame(n,m,c):
count = 1
if (n >= m):
product1 = n*c
print (product1)
for count in range(1,m-1):
words1 = str(' ')
words = (n-4)*words1
print (c,words,c)
count = count+1
print (product1)
else :
product2 = m*c
print (product2)
for count in range(1,n-1):
words2 = str(' ')
words = (m-4)*words2
print (c,words,c)
count = count+1
print (product2)
#2.inputing 2 integers n and m and a character c
print("enter an integer value of n")
integer1 = input()
n = 4
print("enter an integer value of m")
integer2 = input()
m = 7
print("enter a character value of c")
character = input()
c = '$'
print_frame(n,m,c)
| 22.651163 | 67 | 0.532854 |
def print_frame(n,m,c):
count = 1
if (n >= m):
product1 = n*c
print (product1)
for count in range(1,m-1):
words1 = str(' ')
words = (n-4)*words1
print (c,words,c)
count = count+1
print (product1)
else :
product2 = m*c
print (product2)
for count in range(1,n-1):
words2 = str(' ')
words = (m-4)*words2
print (c,words,c)
count = count+1
print (product2)
print("enter an integer value of n")
integer1 = input()
n = 4
print("enter an integer value of m")
integer2 = input()
m = 7
print("enter a character value of c")
character = input()
c = '$'
print_frame(n,m,c)
| true | true |
f726d1cf94494190573638e38a30c1c86a608bae | 595 | py | Python | next-permutation/next-permutation.py | gashev/algorithms | ea750b84658e282afad9db3cd51081e30521074b | [
"Unlicense"
] | 1 | 2020-07-23T21:33:43.000Z | 2020-07-23T21:33:43.000Z | next-permutation/next-permutation.py | gashev/algorithms | ea750b84658e282afad9db3cd51081e30521074b | [
"Unlicense"
] | null | null | null | next-permutation/next-permutation.py | gashev/algorithms | ea750b84658e282afad9db3cd51081e30521074b | [
"Unlicense"
] | null | null | null | def nextPermutation(numbers):
size = len(numbers)
tmp = len(numbers) - 1
while (tmp >= 0) and (numbers[tmp - 1] > numbers[tmp]):
tmp -= 1
if (not tmp):
return False
i = tmp - 1
tmp = size - 1
while (tmp > i) and (numbers[tmp] < numbers[i]):
tmp -= 1
j = tmp
numbers[i], numbers[j] = numbers[j], numbers[i]
i = i + 1
j = size - 1
while(i < j):
numbers[i], numbers[j] = numbers[j], numbers[i]
i += 1
j -= 1
return True
a = [1, 2, 3, 4, 5, 6]
print a
while nextPermutation(a):
print a
| 18.030303 | 59 | 0.494118 | def nextPermutation(numbers):
size = len(numbers)
tmp = len(numbers) - 1
while (tmp >= 0) and (numbers[tmp - 1] > numbers[tmp]):
tmp -= 1
if (not tmp):
return False
i = tmp - 1
tmp = size - 1
while (tmp > i) and (numbers[tmp] < numbers[i]):
tmp -= 1
j = tmp
numbers[i], numbers[j] = numbers[j], numbers[i]
i = i + 1
j = size - 1
while(i < j):
numbers[i], numbers[j] = numbers[j], numbers[i]
i += 1
j -= 1
return True
a = [1, 2, 3, 4, 5, 6]
print a
while nextPermutation(a):
print a
| false | true |
f726d2195174ef150cf9c6dca642b46141ce4e9e | 13,720 | py | Python | demystifying/feature_extraction/mlp_feature_extractor.py | delemottelab/demystifying | e8527b52d5fbe0570cd391921ecda5aefceb797a | [
"MIT"
] | 16 | 2020-01-04T14:46:03.000Z | 2021-07-10T05:54:05.000Z | demystifying/feature_extraction/mlp_feature_extractor.py | delemottelab/demystifying | e8527b52d5fbe0570cd391921ecda5aefceb797a | [
"MIT"
] | 11 | 2020-01-10T16:18:17.000Z | 2022-03-20T09:53:33.000Z | demystifying/feature_extraction/mlp_feature_extractor.py | delemottelab/demystifying | e8527b52d5fbe0570cd391921ecda5aefceb797a | [
"MIT"
] | 3 | 2020-03-16T04:35:01.000Z | 2022-02-10T12:39:01.000Z | from __future__ import absolute_import, division, print_function
import logging
import sys
logging.basicConfig(
stream=sys.stdout,
format='%(asctime)s %(name)s-%(levelname)s: %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
import numpy as np
from sklearn.neural_network import MLPClassifier, MLPRegressor
from .. import relevance_propagation as relprop
from .feature_extractor import FeatureExtractor
from ..postprocessing import PerFrameImportancePostProcessor
logger = logging.getLogger("mlp")
class MlpFeatureExtractor(FeatureExtractor):
def __init__(self,
name="MLP",
activation=relprop.relu,
randomize=True,
supervised=True,
one_vs_rest=False,
per_frame_importance_outfile=None,
per_frame_importance_samples=None,
per_frame_importance_labels=None,
classifier_kwargs={},
**kwargs):
FeatureExtractor.__init__(self,
name=name,
supervised=supervised,
**kwargs)
self.backend = "scikit-learn" # Only available option for now, more to come probably
if activation not in [relprop.relu, relprop.logistic_sigmoid]:
Exception("Relevance propagation currently only supported for relu or logistic")
self.activation = activation
self.randomize = randomize
self.classifier_kwargs = classifier_kwargs.copy()
if classifier_kwargs.get('activation', None) is not None and \
classifier_kwargs.get('activation') != self.activation:
logger.warn("Conflicting activation properiies. '%s' will be overwritten with '%s'",
classifier_kwargs.get('activation'),
self.activation)
self.classifier_kwargs['activation'] = self.activation
if not self.randomize:
self.classifier_kwargs['random_state'] = 89274
self.frame_importances = None
self.per_frame_importance_outfile = per_frame_importance_outfile
self.per_frame_importance_samples = per_frame_importance_samples
self.per_frame_importance_labels = per_frame_importance_labels
if self.use_regression:
self.one_vs_rest = False
else:
self.one_vs_rest = one_vs_rest
logger.debug("Initializing MLP with the following parameters:"
" activation function %s, randomize %s, classifier_kwargs %s,"
" per_frame_importance_outfile %s, backend %s, per_frame_importance_samples %s, one_vs_rest %s",
activation, randomize, classifier_kwargs, per_frame_importance_outfile, self.backend,
None if per_frame_importance_samples is None else per_frame_importance_samples.shape,
self.one_vs_rest)
def _train_one_vs_rest(self, data, labels):
n_clusters = labels.shape[1]
n_points = data.shape[0]
classifiers = []
for i_cluster in range(n_clusters):
classifiers.append(self._create_classifier())
binary_labels = np.zeros((n_points, 2))
binary_labels[labels[:, i_cluster] == 1, 0] = 1
binary_labels[labels[:, i_cluster] != 1, 1] = 1
classifiers[i_cluster].fit(data, binary_labels)
return classifiers
def train(self, train_set, train_labels):
"""
TODO code duplication below for on_vs_the_rest logic, refactor with KL and RF into common superclass
:param train_set:
:param train_labels:
:return:
"""
# Construct and train classifier
logger.debug("Training %s with %s samples and %s features ...", self.name, train_set.shape[0],
train_set.shape[1])
if self.one_vs_rest:
return self._train_one_vs_rest(train_set, train_labels)
else:
classifier = self._create_classifier()
classifier.fit(train_set, train_labels)
return classifier
def _normalize_relevance_per_frame(self, relevance_per_frame):
for i in range(relevance_per_frame.shape[0]):
# Not removing negative relevance in per frame analysis
# ind_negative = np.where(relevance_per_frame[i, :] < 0)[0]
# relevance_per_frame[i, ind_negative] = 0
relevance_per_frame[i, :] = (relevance_per_frame[i, :] - np.min(relevance_per_frame[i, :])) / \
(np.max(relevance_per_frame[i, :]) - np.min(relevance_per_frame[i, :]) + 1e-9)
return relevance_per_frame
def _perform_lrp(self, classifier, data, labels):
nclusters = labels.shape[1] if self.supervised else 1
nfeatures = data.shape[1]
relevance_per_cluster = np.zeros((nfeatures, nclusters))
per_frame_relevance = np.zeros(data.shape)
for c_idx in range(nclusters):
# Get all frames belonging to a cluster
if self.supervised:
frame_indices = labels[:, c_idx] == 1
cluster_data = data[frame_indices]
cluster_labels = np.zeros((len(cluster_data), nclusters))
cluster_labels[:, c_idx] = 1 # Only look at one class at the time
else:
# TODO refactor to break unsupervised code out of here. Unsupervised method have no concept of clusters/labels
cluster_labels = labels
frame_indices = [i for i in range(len(data))]
cluster_data = data
if len(cluster_data) == 0:
continue
# Now see what makes these frames belong to that class
# Time for LRP
layers = self._create_layers(classifier)
propagator = relprop.RelevancePropagator(layers)
cluster_frame_relevance = propagator.propagate(cluster_data, cluster_labels)
# Rescale relevance according to min and max relevance in each frame
cluster_frame_relevance = self._normalize_relevance_per_frame(cluster_frame_relevance)
relevance_per_cluster[:, c_idx] = cluster_frame_relevance.mean(axis=0)
per_frame_relevance[frame_indices] += cluster_frame_relevance
per_frame_relevance = self._normalize_relevance_per_frame(per_frame_relevance)
return per_frame_relevance, relevance_per_cluster
def get_feature_importance(self, classifier, data, labels):
logger.debug("Extracting feature importance using MLP ...")
if self.one_vs_rest:
return self._get_feature_importance_binaryclass(classifier, data, labels)
else:
return self._get_feature_importance_multiclass(classifier, data, labels)
def _get_feature_importance_binaryclass(self, classifiers, data, labels):
n_features = data.shape[1]
n_frames = data.shape[0]
n_states = labels.shape[1] if len(labels.shape) > 1 else 1
feature_importances = np.zeros((n_features, self.n_clusters))
for i_cluster in range(n_states):
# TODO a bit inefficent approach below where we consistenly compute LRP for all other clusters and don't use those results.
cluster_frames = labels[:, i_cluster] == 1
binary_labels = np.zeros((n_frames, 2))
binary_labels[cluster_frames, 0] = 1
binary_labels[~cluster_frames, 1] = 1
relevance_per_frame, relevance_per_cluster = self._perform_lrp(classifiers[i_cluster], data, binary_labels)
feature_importances[:, i_cluster] = relevance_per_cluster[:, 0]
if self.per_frame_importance_outfile is not None:
cluster_frame_importances, other_labels = self._compute_frame_relevance(classifiers[i_cluster],
relevance_per_frame,
data,
labels)
if self.frame_importances is None:
self.frame_importances = np.zeros((len(other_labels), cluster_frame_importances.shape[1]))
other_cluster_frames = other_labels[:, 0] == 1
if len(other_labels[other_cluster_frames]) == 0:
# No frames in this state, just move on
continue
nclusters_per_frame = other_labels[other_cluster_frames].sum(axis=1)[:, np.newaxis]
self.frame_importances[other_cluster_frames, :] += cluster_frame_importances[
other_cluster_frames] / nclusters_per_frame
return feature_importances
def _get_feature_importance_multiclass(self, classifier, data, labels):
relevance_per_frame, relevance_per_cluster = self._perform_lrp(classifier, data, labels)
if self.per_frame_importance_outfile is not None:
frame_importances, _ = self._compute_frame_relevance(classifier, relevance_per_frame, data, labels)
self.frame_importances = frame_importances if self.frame_importances is None else self.frame_importances + frame_importances
return relevance_per_cluster
def _compute_frame_relevance(self, classifier, relevance_per_frame, data, labels):
if self.per_frame_importance_samples is not None:
if self.indices_for_filtering is None:
other_samples = self.per_frame_importance_samples
else:
other_samples = self.per_frame_importance_samples[:, self.indices_for_filtering]
if self.per_frame_importance_labels is None:
other_labels = classifier.predict(other_samples)
else:
other_labels = self.per_frame_importance_labels
other_samples = self.scaler.transform(other_samples)
frame_relevance, _ = self._perform_lrp(classifier, other_samples, other_labels)
else:
logger.info("Using same trajectory for per frame importance as was used for training.")
if self.n_splits != 1:
logger.error(
"Cannot average frame importance to outfile if n_splits != 1. n_splits is now set to %s",
self.n_splits)
if self.shuffle_datasets:
logger.error("Data set has been shuffled, per frame importance will not be properly mapped")
frame_relevance = relevance_per_frame
other_labels = labels
# for every feature in every frame...
frame_importances = np.zeros(
(data if self.per_frame_importance_samples is None else self.per_frame_importance_samples).shape) - 1
if self.indices_for_filtering is not None:
frame_importances[:, self.indices_for_filtering] = 0
niters = self.n_iterations * self.n_splits
for frame_idx, rel in enumerate(frame_relevance):
if self.indices_for_filtering is None:
frame_importances[frame_idx] += rel / niters
else:
frame_importances[frame_idx, self.indices_for_filtering] += rel / niters
return frame_importances, other_labels
def _create_layers(self, classifier):
weights = classifier.coefs_
biases = classifier.intercepts_
layers = []
for idx, weight in enumerate(weights):
if idx == 0:
l = relprop.FirstLinear(min_val=0, max_val=1, weight=weight, bias=biases[idx])
else:
l = relprop.layer_for_string(self.activation, weight=weight, bias=biases[idx])
if l is None:
raise Exception(
"Cannot create layer at index {} for activation function {}".format(idx, self.activation))
layers.append(l)
if idx < len(weights) - 1:
# Add activation to all except output layer
activation = relprop.layer_activation_for_string(self.activation)
if activation is None:
raise Exception("Unknown activation function {}".format(self.activation))
layers.append(activation)
else:
if self.backend == 'scikit-learn':
# For scikit implementation see # https://stats.stackexchange.com/questions/243588/how-to-apply-softmax-as-activation-function-in-multi-layer-perceptron-in-scikit
# or https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/neural_network/multilayer_perceptron.py
out_activation = relprop.layer_activation_for_string(classifier.out_activation_)
if out_activation is None:
raise Exception("Unknown activation function {}".format(self.activation))
layers.append(out_activation)
else:
raise Exception("Unsupported MLP backend {}".format(self.backend))
return layers
def _create_classifier(self):
return MLPRegressor(**self.classifier_kwargs) if self.use_regression \
else MLPClassifier(**self.classifier_kwargs)
def postprocessing(self, **kwargs):
return PerFrameImportancePostProcessor(extractor=self,
per_frame_importance_outfile=self.per_frame_importance_outfile,
frame_importances=self.frame_importances,
**kwargs)
| 52.769231 | 183 | 0.624344 | from __future__ import absolute_import, division, print_function
import logging
import sys
logging.basicConfig(
stream=sys.stdout,
format='%(asctime)s %(name)s-%(levelname)s: %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
import numpy as np
from sklearn.neural_network import MLPClassifier, MLPRegressor
from .. import relevance_propagation as relprop
from .feature_extractor import FeatureExtractor
from ..postprocessing import PerFrameImportancePostProcessor
logger = logging.getLogger("mlp")
class MlpFeatureExtractor(FeatureExtractor):
def __init__(self,
name="MLP",
activation=relprop.relu,
randomize=True,
supervised=True,
one_vs_rest=False,
per_frame_importance_outfile=None,
per_frame_importance_samples=None,
per_frame_importance_labels=None,
classifier_kwargs={},
**kwargs):
FeatureExtractor.__init__(self,
name=name,
supervised=supervised,
**kwargs)
self.backend = "scikit-learn"
if activation not in [relprop.relu, relprop.logistic_sigmoid]:
Exception("Relevance propagation currently only supported for relu or logistic")
self.activation = activation
self.randomize = randomize
self.classifier_kwargs = classifier_kwargs.copy()
if classifier_kwargs.get('activation', None) is not None and \
classifier_kwargs.get('activation') != self.activation:
logger.warn("Conflicting activation properiies. '%s' will be overwritten with '%s'",
classifier_kwargs.get('activation'),
self.activation)
self.classifier_kwargs['activation'] = self.activation
if not self.randomize:
self.classifier_kwargs['random_state'] = 89274
self.frame_importances = None
self.per_frame_importance_outfile = per_frame_importance_outfile
self.per_frame_importance_samples = per_frame_importance_samples
self.per_frame_importance_labels = per_frame_importance_labels
if self.use_regression:
self.one_vs_rest = False
else:
self.one_vs_rest = one_vs_rest
logger.debug("Initializing MLP with the following parameters:"
" activation function %s, randomize %s, classifier_kwargs %s,"
" per_frame_importance_outfile %s, backend %s, per_frame_importance_samples %s, one_vs_rest %s",
activation, randomize, classifier_kwargs, per_frame_importance_outfile, self.backend,
None if per_frame_importance_samples is None else per_frame_importance_samples.shape,
self.one_vs_rest)
def _train_one_vs_rest(self, data, labels):
n_clusters = labels.shape[1]
n_points = data.shape[0]
classifiers = []
for i_cluster in range(n_clusters):
classifiers.append(self._create_classifier())
binary_labels = np.zeros((n_points, 2))
binary_labels[labels[:, i_cluster] == 1, 0] = 1
binary_labels[labels[:, i_cluster] != 1, 1] = 1
classifiers[i_cluster].fit(data, binary_labels)
return classifiers
def train(self, train_set, train_labels):
logger.debug("Training %s with %s samples and %s features ...", self.name, train_set.shape[0],
train_set.shape[1])
if self.one_vs_rest:
return self._train_one_vs_rest(train_set, train_labels)
else:
classifier = self._create_classifier()
classifier.fit(train_set, train_labels)
return classifier
def _normalize_relevance_per_frame(self, relevance_per_frame):
for i in range(relevance_per_frame.shape[0]):
relevance_per_frame[i, :] = (relevance_per_frame[i, :] - np.min(relevance_per_frame[i, :])) / \
(np.max(relevance_per_frame[i, :]) - np.min(relevance_per_frame[i, :]) + 1e-9)
return relevance_per_frame
def _perform_lrp(self, classifier, data, labels):
nclusters = labels.shape[1] if self.supervised else 1
nfeatures = data.shape[1]
relevance_per_cluster = np.zeros((nfeatures, nclusters))
per_frame_relevance = np.zeros(data.shape)
for c_idx in range(nclusters):
if self.supervised:
frame_indices = labels[:, c_idx] == 1
cluster_data = data[frame_indices]
cluster_labels = np.zeros((len(cluster_data), nclusters))
cluster_labels[:, c_idx] = 1
else:
cluster_labels = labels
frame_indices = [i for i in range(len(data))]
cluster_data = data
if len(cluster_data) == 0:
continue
layers = self._create_layers(classifier)
propagator = relprop.RelevancePropagator(layers)
cluster_frame_relevance = propagator.propagate(cluster_data, cluster_labels)
cluster_frame_relevance = self._normalize_relevance_per_frame(cluster_frame_relevance)
relevance_per_cluster[:, c_idx] = cluster_frame_relevance.mean(axis=0)
per_frame_relevance[frame_indices] += cluster_frame_relevance
per_frame_relevance = self._normalize_relevance_per_frame(per_frame_relevance)
return per_frame_relevance, relevance_per_cluster
def get_feature_importance(self, classifier, data, labels):
logger.debug("Extracting feature importance using MLP ...")
if self.one_vs_rest:
return self._get_feature_importance_binaryclass(classifier, data, labels)
else:
return self._get_feature_importance_multiclass(classifier, data, labels)
def _get_feature_importance_binaryclass(self, classifiers, data, labels):
n_features = data.shape[1]
n_frames = data.shape[0]
n_states = labels.shape[1] if len(labels.shape) > 1 else 1
feature_importances = np.zeros((n_features, self.n_clusters))
for i_cluster in range(n_states):
cluster_frames = labels[:, i_cluster] == 1
binary_labels = np.zeros((n_frames, 2))
binary_labels[cluster_frames, 0] = 1
binary_labels[~cluster_frames, 1] = 1
relevance_per_frame, relevance_per_cluster = self._perform_lrp(classifiers[i_cluster], data, binary_labels)
feature_importances[:, i_cluster] = relevance_per_cluster[:, 0]
if self.per_frame_importance_outfile is not None:
cluster_frame_importances, other_labels = self._compute_frame_relevance(classifiers[i_cluster],
relevance_per_frame,
data,
labels)
if self.frame_importances is None:
self.frame_importances = np.zeros((len(other_labels), cluster_frame_importances.shape[1]))
other_cluster_frames = other_labels[:, 0] == 1
if len(other_labels[other_cluster_frames]) == 0:
# No frames in this state, just move on
continue
nclusters_per_frame = other_labels[other_cluster_frames].sum(axis=1)[:, np.newaxis]
self.frame_importances[other_cluster_frames, :] += cluster_frame_importances[
other_cluster_frames] / nclusters_per_frame
return feature_importances
def _get_feature_importance_multiclass(self, classifier, data, labels):
relevance_per_frame, relevance_per_cluster = self._perform_lrp(classifier, data, labels)
if self.per_frame_importance_outfile is not None:
frame_importances, _ = self._compute_frame_relevance(classifier, relevance_per_frame, data, labels)
self.frame_importances = frame_importances if self.frame_importances is None else self.frame_importances + frame_importances
return relevance_per_cluster
def _compute_frame_relevance(self, classifier, relevance_per_frame, data, labels):
if self.per_frame_importance_samples is not None:
if self.indices_for_filtering is None:
other_samples = self.per_frame_importance_samples
else:
other_samples = self.per_frame_importance_samples[:, self.indices_for_filtering]
if self.per_frame_importance_labels is None:
other_labels = classifier.predict(other_samples)
else:
other_labels = self.per_frame_importance_labels
other_samples = self.scaler.transform(other_samples)
frame_relevance, _ = self._perform_lrp(classifier, other_samples, other_labels)
else:
logger.info("Using same trajectory for per frame importance as was used for training.")
if self.n_splits != 1:
logger.error(
"Cannot average frame importance to outfile if n_splits != 1. n_splits is now set to %s",
self.n_splits)
if self.shuffle_datasets:
logger.error("Data set has been shuffled, per frame importance will not be properly mapped")
frame_relevance = relevance_per_frame
other_labels = labels
# for every feature in every frame...
frame_importances = np.zeros(
(data if self.per_frame_importance_samples is None else self.per_frame_importance_samples).shape) - 1
if self.indices_for_filtering is not None:
frame_importances[:, self.indices_for_filtering] = 0
niters = self.n_iterations * self.n_splits
for frame_idx, rel in enumerate(frame_relevance):
if self.indices_for_filtering is None:
frame_importances[frame_idx] += rel / niters
else:
frame_importances[frame_idx, self.indices_for_filtering] += rel / niters
return frame_importances, other_labels
def _create_layers(self, classifier):
weights = classifier.coefs_
biases = classifier.intercepts_
layers = []
for idx, weight in enumerate(weights):
if idx == 0:
l = relprop.FirstLinear(min_val=0, max_val=1, weight=weight, bias=biases[idx])
else:
l = relprop.layer_for_string(self.activation, weight=weight, bias=biases[idx])
if l is None:
raise Exception(
"Cannot create layer at index {} for activation function {}".format(idx, self.activation))
layers.append(l)
if idx < len(weights) - 1:
# Add activation to all except output layer
activation = relprop.layer_activation_for_string(self.activation)
if activation is None:
raise Exception("Unknown activation function {}".format(self.activation))
layers.append(activation)
else:
if self.backend == 'scikit-learn':
# For scikit implementation see # https://stats.stackexchange.com/questions/243588/how-to-apply-softmax-as-activation-function-in-multi-layer-perceptron-in-scikit
# or https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/neural_network/multilayer_perceptron.py
out_activation = relprop.layer_activation_for_string(classifier.out_activation_)
if out_activation is None:
raise Exception("Unknown activation function {}".format(self.activation))
layers.append(out_activation)
else:
raise Exception("Unsupported MLP backend {}".format(self.backend))
return layers
def _create_classifier(self):
return MLPRegressor(**self.classifier_kwargs) if self.use_regression \
else MLPClassifier(**self.classifier_kwargs)
def postprocessing(self, **kwargs):
return PerFrameImportancePostProcessor(extractor=self,
per_frame_importance_outfile=self.per_frame_importance_outfile,
frame_importances=self.frame_importances,
**kwargs)
| true | true |
f726d5a556fdd6e0a03f5723767785d9c1e98fa3 | 2,358 | py | Python | parallelization/collect.py | allisonChilton/Reed-Solomon | 62c367ba44940df24c7dfa23331e491f35607abc | [
"MIT"
] | null | null | null | parallelization/collect.py | allisonChilton/Reed-Solomon | 62c367ba44940df24c7dfa23331e491f35607abc | [
"MIT"
] | null | null | null | parallelization/collect.py | allisonChilton/Reed-Solomon | 62c367ba44940df24c7dfa23331e491f35607abc | [
"MIT"
] | null | null | null | import sys
import os
import subprocess
import re
import time
from dataclasses import dataclass
from typing import List
import pandas
time_reg = re.compile("Checkpoint \d: ([\d\\.]{1,})")
def run_cmd(cmd):
print(f"Running {cmd}")
proc = subprocess.run(cmd, shell=True, capture_output=True)
stdout = proc.stdout.decode()
stderr = proc.stderr.decode()
return stdout, stderr
@dataclass
class Result:
program: str
checkpoints: List[float]
threads: int
filesize: float
@property
def encoding_time(self):
return self.checkpoints[2]
@property
def decoding_time(self):
return self.checkpoints[4]
def asdict(self):
d = self.__dict__
d['encoding_time'] = self.encoding_time
d['decoding_time'] = self.decoding_time
del d['checkpoints']
return d
if __name__ == "__main__":
in_dir = "../../inputs"
inputs = sorted(os.listdir(in_dir))
program = ["mpi.sh", "baseline", "baseline-8ecc", "omp", "omp-8ecc"]
results = []
for p in program:
for i in inputs:
if "7.txt" in i and "mpi" in p:
continue
for threads in range(1,17):
if "baseline" in p and threads > 1:
break
if p == "omp":
os.environ['OMP_NUM_THREADS'] = str(threads)
infile = os.path.join(in_dir,i)
filesize = os.stat(infile).st_size / 1000000
count = f" {threads}" if "mpi" in p else ""
stdout, stderr = run_cmd(f"./{p} {infile}{count}")
checkpoint_times = [float(x) for x in time_reg.findall(stdout)]
results.append(Result(p, checkpoint_times, threads, filesize))
if "mpi" in p:
for threads in [32,48,64,96]:
infile = os.path.join(in_dir,i)
filesize = os.stat(infile).st_size / 1000000
count = f" {threads}" if "mpi" in p else ""
stdout, stderr = run_cmd(f"./{p} {infile}{count}")
checkpoint_times = [float(x) for x in time_reg.findall(stdout)]
results.append(Result(p, checkpoint_times, threads, filesize))
df = pandas.DataFrame([x.asdict() for x in results])
df.to_csv("results.csv")
print(df)
| 31.026316 | 83 | 0.563189 | import sys
import os
import subprocess
import re
import time
from dataclasses import dataclass
from typing import List
import pandas
time_reg = re.compile("Checkpoint \d: ([\d\\.]{1,})")
def run_cmd(cmd):
print(f"Running {cmd}")
proc = subprocess.run(cmd, shell=True, capture_output=True)
stdout = proc.stdout.decode()
stderr = proc.stderr.decode()
return stdout, stderr
@dataclass
class Result:
program: str
checkpoints: List[float]
threads: int
filesize: float
@property
def encoding_time(self):
return self.checkpoints[2]
@property
def decoding_time(self):
return self.checkpoints[4]
def asdict(self):
d = self.__dict__
d['encoding_time'] = self.encoding_time
d['decoding_time'] = self.decoding_time
del d['checkpoints']
return d
if __name__ == "__main__":
in_dir = "../../inputs"
inputs = sorted(os.listdir(in_dir))
program = ["mpi.sh", "baseline", "baseline-8ecc", "omp", "omp-8ecc"]
results = []
for p in program:
for i in inputs:
if "7.txt" in i and "mpi" in p:
continue
for threads in range(1,17):
if "baseline" in p and threads > 1:
break
if p == "omp":
os.environ['OMP_NUM_THREADS'] = str(threads)
infile = os.path.join(in_dir,i)
filesize = os.stat(infile).st_size / 1000000
count = f" {threads}" if "mpi" in p else ""
stdout, stderr = run_cmd(f"./{p} {infile}{count}")
checkpoint_times = [float(x) for x in time_reg.findall(stdout)]
results.append(Result(p, checkpoint_times, threads, filesize))
if "mpi" in p:
for threads in [32,48,64,96]:
infile = os.path.join(in_dir,i)
filesize = os.stat(infile).st_size / 1000000
count = f" {threads}" if "mpi" in p else ""
stdout, stderr = run_cmd(f"./{p} {infile}{count}")
checkpoint_times = [float(x) for x in time_reg.findall(stdout)]
results.append(Result(p, checkpoint_times, threads, filesize))
df = pandas.DataFrame([x.asdict() for x in results])
df.to_csv("results.csv")
print(df)
| true | true |
f726d81ab8d4dbd5bfa8f4889d90ea24f3a749f0 | 6,230 | py | Python | ckanext/reclineview/tests/test_view.py | florianm/ckan | 1cfd98d591ac70b4eb81048bcd227b6c1354b1bf | [
"Apache-2.0"
] | 2 | 2015-07-17T19:09:52.000Z | 2017-08-30T20:23:44.000Z | ckanext/reclineview/tests/test_view.py | florianm/ckan | 1cfd98d591ac70b4eb81048bcd227b6c1354b1bf | [
"Apache-2.0"
] | 12 | 2015-01-19T18:03:56.000Z | 2016-04-11T16:40:33.000Z | ckanext/reclineview/tests/test_view.py | florianm/ckan | 1cfd98d591ac70b4eb81048bcd227b6c1354b1bf | [
"Apache-2.0"
] | 3 | 2015-03-31T06:19:42.000Z | 2016-06-27T15:32:28.000Z | import paste.fixture
import pylons.config as config
import ckan.model as model
import ckan.tests.legacy as tests
import ckan.plugins as p
import ckan.lib.helpers as h
import ckanext.reclineview.plugin as plugin
import ckan.lib.create_test_data as create_test_data
import ckan.config.middleware as middleware
from ckan.tests import helpers, factories
class BaseTestReclineViewBase(tests.WsgiAppCase):
@classmethod
def setup_class(cls):
cls.config_templates = config['ckan.legacy_templates']
config['ckan.legacy_templates'] = 'false'
wsgiapp = middleware.make_app(config['global_conf'], **config)
p.load(cls.view_type)
cls.app = paste.fixture.TestApp(wsgiapp)
cls.p = cls.view_class()
create_test_data.CreateTestData.create()
cls.resource_view, cls.package, cls.resource_id = \
_create_test_view(cls.view_type)
@classmethod
def teardown_class(cls):
config['ckan.legacy_templates'] = cls.config_templates
p.unload(cls.view_type)
model.repo.rebuild_db()
def test_can_view(self):
data_dict = {'resource': {'datastore_active': True}}
assert self.p.can_view(data_dict)
data_dict = {'resource': {'datastore_active': False}}
assert not self.p.can_view(data_dict)
def test_title_description_iframe_shown(self):
url = h.url_for(controller='package', action='resource_read',
id=self.package.name, resource_id=self.resource_id)
result = self.app.get(url)
assert self.resource_view['title'] in result
assert self.resource_view['description'] in result
assert 'data-module="data-viewer"' in result.body
class TestReclineView(BaseTestReclineViewBase):
view_type = 'recline_view'
view_class = plugin.ReclineView
def test_it_has_no_schema(self):
schema = self.p.info().get('schema')
assert schema is None, schema
def test_can_view_format_no_datastore(self):
'''
Test can_view with acceptable formats when datastore_active is False
(DataProxy in use).
'''
formats = ['CSV', 'XLS', 'TSV', 'csv', 'xls', 'tsv']
for resource_format in formats:
data_dict = {'resource': {'datastore_active': False,
'format': resource_format}}
assert self.p.can_view(data_dict)
def test_can_view_bad_format_no_datastore(self):
'''
Test can_view with incorrect formats when datastore_active is False.
'''
formats = ['TXT', 'txt', 'doc', 'JSON']
for resource_format in formats:
data_dict = {'resource': {'datastore_active': False,
'format': resource_format}}
assert not self.p.can_view(data_dict)
class TestReclineViewDatastoreOnly(helpers.FunctionalTestBase):
@classmethod
def setup_class(cls):
if not p.plugin_loaded('recline_view'):
p.load('recline_view')
if not p.plugin_loaded('datastore'):
p.load('datastore')
app_config = config.copy()
app_config['ckan.legacy_templates'] = 'false'
app_config['ckan.plugins'] = 'recline_view datastore'
app_config['ckan.views.default_views'] = 'recline_view'
wsgiapp = middleware.make_app(config['global_conf'], **app_config)
cls.app = paste.fixture.TestApp(wsgiapp)
@classmethod
def teardown_class(cls):
if p.plugin_loaded('recline_view'):
p.unload('recline_view')
if p.plugin_loaded('datastore'):
p.unload('datastore')
def test_create_datastore_only_view(self):
dataset = factories.Dataset()
data = {
'resource': {'package_id': dataset['id']},
'fields': [{'id': 'a'}, {'id': 'b'}],
'records': [{'a': 1, 'b': 'xyz'}, {'a': 2, 'b': 'zzz'}]
}
result = helpers.call_action('datastore_create', **data)
resource_id = result['resource_id']
url = h.url_for(controller='package', action='resource_read',
id=dataset['id'], resource_id=resource_id)
result = self.app.get(url)
assert 'data-module="data-viewer"' in result.body
class TestReclineGridView(BaseTestReclineViewBase):
view_type = 'recline_grid_view'
view_class = plugin.ReclineGridView
def test_it_has_no_schema(self):
schema = self.p.info().get('schema')
assert schema is None, schema
class TestReclineGraphView(BaseTestReclineViewBase):
view_type = 'recline_graph_view'
view_class = plugin.ReclineGraphView
def test_it_has_the_correct_schema_keys(self):
schema = self.p.info().get('schema')
expected_keys = ['offset', 'limit', 'graph_type', 'group', 'series']
_assert_schema_exists_and_has_keys(schema, expected_keys)
class TestReclineMapView(BaseTestReclineViewBase):
view_type = 'recline_map_view'
view_class = plugin.ReclineMapView
def test_it_has_the_correct_schema_keys(self):
schema = self.p.info().get('schema')
expected_keys = ['offset', 'limit', 'map_field_type',
'latitude_field', 'longitude_field', 'geojson_field',
'auto_zoom', 'cluster_markers']
_assert_schema_exists_and_has_keys(schema, expected_keys)
def _create_test_view(view_type):
context = {'model': model,
'session': model.Session,
'user': model.User.get('testsysadmin').name}
package = model.Package.get('annakarenina')
resource_id = package.resources[1].id
resource_view = {'resource_id': resource_id,
'view_type': view_type,
'title': u'Test View',
'description': u'A nice test view'}
resource_view = p.toolkit.get_action('resource_view_create')(
context, resource_view)
return resource_view, package, resource_id
def _assert_schema_exists_and_has_keys(schema, expected_keys):
assert schema is not None, schema
keys = schema.keys()
keys.sort()
expected_keys.sort()
assert keys == expected_keys, '%s != %s' % (keys, expected_keys)
| 34.804469 | 78 | 0.643499 | import paste.fixture
import pylons.config as config
import ckan.model as model
import ckan.tests.legacy as tests
import ckan.plugins as p
import ckan.lib.helpers as h
import ckanext.reclineview.plugin as plugin
import ckan.lib.create_test_data as create_test_data
import ckan.config.middleware as middleware
from ckan.tests import helpers, factories
class BaseTestReclineViewBase(tests.WsgiAppCase):
@classmethod
def setup_class(cls):
cls.config_templates = config['ckan.legacy_templates']
config['ckan.legacy_templates'] = 'false'
wsgiapp = middleware.make_app(config['global_conf'], **config)
p.load(cls.view_type)
cls.app = paste.fixture.TestApp(wsgiapp)
cls.p = cls.view_class()
create_test_data.CreateTestData.create()
cls.resource_view, cls.package, cls.resource_id = \
_create_test_view(cls.view_type)
@classmethod
def teardown_class(cls):
config['ckan.legacy_templates'] = cls.config_templates
p.unload(cls.view_type)
model.repo.rebuild_db()
def test_can_view(self):
data_dict = {'resource': {'datastore_active': True}}
assert self.p.can_view(data_dict)
data_dict = {'resource': {'datastore_active': False}}
assert not self.p.can_view(data_dict)
def test_title_description_iframe_shown(self):
url = h.url_for(controller='package', action='resource_read',
id=self.package.name, resource_id=self.resource_id)
result = self.app.get(url)
assert self.resource_view['title'] in result
assert self.resource_view['description'] in result
assert 'data-module="data-viewer"' in result.body
class TestReclineView(BaseTestReclineViewBase):
view_type = 'recline_view'
view_class = plugin.ReclineView
def test_it_has_no_schema(self):
schema = self.p.info().get('schema')
assert schema is None, schema
def test_can_view_format_no_datastore(self):
formats = ['CSV', 'XLS', 'TSV', 'csv', 'xls', 'tsv']
for resource_format in formats:
data_dict = {'resource': {'datastore_active': False,
'format': resource_format}}
assert self.p.can_view(data_dict)
def test_can_view_bad_format_no_datastore(self):
formats = ['TXT', 'txt', 'doc', 'JSON']
for resource_format in formats:
data_dict = {'resource': {'datastore_active': False,
'format': resource_format}}
assert not self.p.can_view(data_dict)
class TestReclineViewDatastoreOnly(helpers.FunctionalTestBase):
@classmethod
def setup_class(cls):
if not p.plugin_loaded('recline_view'):
p.load('recline_view')
if not p.plugin_loaded('datastore'):
p.load('datastore')
app_config = config.copy()
app_config['ckan.legacy_templates'] = 'false'
app_config['ckan.plugins'] = 'recline_view datastore'
app_config['ckan.views.default_views'] = 'recline_view'
wsgiapp = middleware.make_app(config['global_conf'], **app_config)
cls.app = paste.fixture.TestApp(wsgiapp)
@classmethod
def teardown_class(cls):
if p.plugin_loaded('recline_view'):
p.unload('recline_view')
if p.plugin_loaded('datastore'):
p.unload('datastore')
def test_create_datastore_only_view(self):
dataset = factories.Dataset()
data = {
'resource': {'package_id': dataset['id']},
'fields': [{'id': 'a'}, {'id': 'b'}],
'records': [{'a': 1, 'b': 'xyz'}, {'a': 2, 'b': 'zzz'}]
}
result = helpers.call_action('datastore_create', **data)
resource_id = result['resource_id']
url = h.url_for(controller='package', action='resource_read',
id=dataset['id'], resource_id=resource_id)
result = self.app.get(url)
assert 'data-module="data-viewer"' in result.body
class TestReclineGridView(BaseTestReclineViewBase):
view_type = 'recline_grid_view'
view_class = plugin.ReclineGridView
def test_it_has_no_schema(self):
schema = self.p.info().get('schema')
assert schema is None, schema
class TestReclineGraphView(BaseTestReclineViewBase):
view_type = 'recline_graph_view'
view_class = plugin.ReclineGraphView
def test_it_has_the_correct_schema_keys(self):
schema = self.p.info().get('schema')
expected_keys = ['offset', 'limit', 'graph_type', 'group', 'series']
_assert_schema_exists_and_has_keys(schema, expected_keys)
class TestReclineMapView(BaseTestReclineViewBase):
view_type = 'recline_map_view'
view_class = plugin.ReclineMapView
def test_it_has_the_correct_schema_keys(self):
schema = self.p.info().get('schema')
expected_keys = ['offset', 'limit', 'map_field_type',
'latitude_field', 'longitude_field', 'geojson_field',
'auto_zoom', 'cluster_markers']
_assert_schema_exists_and_has_keys(schema, expected_keys)
def _create_test_view(view_type):
context = {'model': model,
'session': model.Session,
'user': model.User.get('testsysadmin').name}
package = model.Package.get('annakarenina')
resource_id = package.resources[1].id
resource_view = {'resource_id': resource_id,
'view_type': view_type,
'title': u'Test View',
'description': u'A nice test view'}
resource_view = p.toolkit.get_action('resource_view_create')(
context, resource_view)
return resource_view, package, resource_id
def _assert_schema_exists_and_has_keys(schema, expected_keys):
assert schema is not None, schema
keys = schema.keys()
keys.sort()
expected_keys.sort()
assert keys == expected_keys, '%s != %s' % (keys, expected_keys)
| true | true |
f726d92eda80cb6386391bf319320971dd446ebc | 3,824 | py | Python | Algorithmic Methods of Data Mining/Final_project/graph_partitioning1.py | JayWu7/Machine-Learning-Courses-Study-Record | 7586c3429514bc21c7cfe42f85ca8c0fcf8f072b | [
"Apache-2.0"
] | 1 | 2019-12-04T12:03:11.000Z | 2019-12-04T12:03:11.000Z | Algorithmic Methods of Data Mining/Final_project/graph_partitioning1.py | JayWu7/Machine-Learning-Courses-Study-Record | 7586c3429514bc21c7cfe42f85ca8c0fcf8f072b | [
"Apache-2.0"
] | null | null | null | Algorithmic Methods of Data Mining/Final_project/graph_partitioning1.py | JayWu7/Machine-Learning-Courses-Study-Record | 7586c3429514bc21c7cfe42f85ca8c0fcf8f072b | [
"Apache-2.0"
] | 1 | 2019-11-18T11:20:58.000Z | 2019-11-18T11:20:58.000Z | import numpy as np
from sklearn.cluster import KMeans
import time
from scipy.sparse.linalg import eigs
from scipy.sparse import csr_matrix
class Graph:
def __init__(self, data_name):
self.filename = data_name
self.n = None
self.k = None
self.edges = self.form_graph()
# self.e = None # number of edges
self.adj = None # adjacency list
self.lap = None
self.U = None
self.labels = None
def form_graph(self):
'''
form a graph from the .txt file
:param file: data file
:return: graph, in the shape used latter
n, k
'''
with open('./data/{}'.format(self.filename), 'r') as f:
first_line = f.readline()[:-1] # remove '\n' at the end
meta = first_line.split(' ')
yield int(meta[2]), int(meta[-1])
for i, edge in enumerate(f.readlines()):
s, t = edge[:-1].split(' ')
yield int(s), int(t)
def generate_adj(self):
'''
generate the adjacency matrix of a graph
:param graph: the edges of a graph
:param n: the number of vertices in this graph
:return: adjacency matrix
'''
a = time.time()
self.n, self.k = next(self.edges)
adj = [set() for _ in range(self.n)]
for s, t in self.edges:
adj[s].add(t)
adj[t].add(s)
b = time.time()
print('Generate adjacency matrix cost: {}s'.format(b-a))
return adj
def generate_lap(self):
'''
From adjacency matrix and diagonal matrix build Laplacian matrix
:param dia: diagonal matrix
:param adj: adjacency matrix
:return: Laplacian matrix
'''
a = time.time()
self.lap = np.ndarray((self.n, self.n))
for i, row in enumerate(self.adj):
row_dia = np.zeros(self.n)
row_dia[i] = len(row)
row_adj = [1 if j in row else 0 for j in range(self.n)]
self.lap[i] = row_dia - row_adj
x = np.linalg.norm(self.lap)
self.lap = self.lap / x
b = time.time()
print('Genearte Laplacian matrix cost: {}s'.format(b-a))
def get_U(self):
'''
Using scipy.sparse.linalg.eigs to calculate matrix U that we need for kmeans algorithm
:param lap: laplacian matrix
:param k: a number
:return: matrix U
'''
s = time.time()
self.lap = csr_matrix(self.lap)
_, first_k = eigs(self.lap, self.k, sigma=0)
U = first_k.real
# normalize U
x = np.linalg.norm(U)
U = U / x
t = time.time()
print('Generate U cost: {}s'.format(t - s))
return U
def k_means(self):
'''
Using K-means algorithm to cluster the data
:param data: n points
:param k: number of clusters
:return: clusters
'''
s = time.time()
kmeans = KMeans(n_clusters=self.k, algorithm='auto')
kmeans.fit(self.U)
t = time.time()
print('Run k-means algorithm cost: {}s'.format(t - s))
return kmeans.labels_
def write_clusters(self):
'''
return the clusters of vertices
:param labels: labels generated from kmeans method
:return: clusters
'''
with open('./result/{}_res.txt'.format(self.filename[:-4]), 'w') as f:
for i, l in enumerate(self.labels):
f.write('{} {}\n'.format(i, l))
def main(self):
self.adj = self.generate_adj()
self.generate_lap()
self.U = self.get_U()
self.labels = self.k_means()
self.write_clusters()
if __name__ == '__main__':
graph = Graph('soc-Epinions1.txt')
graph.main()
| 30.110236 | 94 | 0.537918 | import numpy as np
from sklearn.cluster import KMeans
import time
from scipy.sparse.linalg import eigs
from scipy.sparse import csr_matrix
class Graph:
def __init__(self, data_name):
self.filename = data_name
self.n = None
self.k = None
self.edges = self.form_graph()
= None
self.lap = None
self.U = None
self.labels = None
def form_graph(self):
with open('./data/{}'.format(self.filename), 'r') as f:
first_line = f.readline()[:-1]
meta = first_line.split(' ')
yield int(meta[2]), int(meta[-1])
for i, edge in enumerate(f.readlines()):
s, t = edge[:-1].split(' ')
yield int(s), int(t)
def generate_adj(self):
a = time.time()
self.n, self.k = next(self.edges)
adj = [set() for _ in range(self.n)]
for s, t in self.edges:
adj[s].add(t)
adj[t].add(s)
b = time.time()
print('Generate adjacency matrix cost: {}s'.format(b-a))
return adj
def generate_lap(self):
a = time.time()
self.lap = np.ndarray((self.n, self.n))
for i, row in enumerate(self.adj):
row_dia = np.zeros(self.n)
row_dia[i] = len(row)
row_adj = [1 if j in row else 0 for j in range(self.n)]
self.lap[i] = row_dia - row_adj
x = np.linalg.norm(self.lap)
self.lap = self.lap / x
b = time.time()
print('Genearte Laplacian matrix cost: {}s'.format(b-a))
def get_U(self):
s = time.time()
self.lap = csr_matrix(self.lap)
_, first_k = eigs(self.lap, self.k, sigma=0)
U = first_k.real
x = np.linalg.norm(U)
U = U / x
t = time.time()
print('Generate U cost: {}s'.format(t - s))
return U
def k_means(self):
s = time.time()
kmeans = KMeans(n_clusters=self.k, algorithm='auto')
kmeans.fit(self.U)
t = time.time()
print('Run k-means algorithm cost: {}s'.format(t - s))
return kmeans.labels_
def write_clusters(self):
with open('./result/{}_res.txt'.format(self.filename[:-4]), 'w') as f:
for i, l in enumerate(self.labels):
f.write('{} {}\n'.format(i, l))
def main(self):
self.adj = self.generate_adj()
self.generate_lap()
self.U = self.get_U()
self.labels = self.k_means()
self.write_clusters()
if __name__ == '__main__':
graph = Graph('soc-Epinions1.txt')
graph.main()
| true | true |
f726d9f05387af7ecf63d8618efca4e9f2591141 | 1,539 | py | Python | python/test/crawl_stocks/crawlstocks/spiders/GuchengBlockCodes.py | qrsforever/workspace | 53c7ce7ca7da62c9fbb3d991ae9e4e34d07ece5f | [
"MIT"
] | 2 | 2017-06-07T03:20:42.000Z | 2020-01-07T09:14:26.000Z | python/test/crawl_stocks/crawlstocks/spiders/GuchengBlockCodes.py | qrsforever/workspace | 53c7ce7ca7da62c9fbb3d991ae9e4e34d07ece5f | [
"MIT"
] | null | null | null | python/test/crawl_stocks/crawlstocks/spiders/GuchengBlockCodes.py | qrsforever/workspace | 53c7ce7ca7da62c9fbb3d991ae9e4e34d07ece5f | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import re
import scrapy
from crawlstocks.items import GuchengStockCodeItem
class GuchengblockcodesSpider(scrapy.Spider):
name = 'GuchengBlockCodes'
allowed_domains = ['hq.gucheng.com']
custom_settings = {
'ITEM_PIPELINES' : {'crawlstocks.pipelines.file.GuchengCrawlListPipeline':200}
}
def __init__(self, blockname='xiongan'):
if blockname == 'xiongan':
blid = '003813' # 雄安新区
elif blockname == 'jingjinyi':
blid = '003684' # 京津翼一体化
else:
blid = '003813' # 雄安新区
self.start_urls = ['https://hq.gucheng.com/blockInfo/' + blid + '/']
def parse(self, response):
# self.logger.info(response.url)
# <td class="stock_phone stock_textLeft"><a href="/SZ300353/" target="_blank">东土科技</a></td>
item = GuchengStockCodeItem()
for css in response.css('tbody tr td.stock_phone.stock_textLeft a'):
item['name'] = re.sub(r'\s+', '', css.xpath('./text()').get())
item['code'] = css.xpath('./@href').get()[1:-1]
yield item
# not work
# next = response.css('div.stock_page span a[text*="下一页"]::text').get()
# /html/body/article/div/div[4]/section/div/span[8]/a
next_page = response.xpath('//div[contains(@class, \
"stock_page")]/span/a[contains(.//text(), "下一页")]/@href').get()
if next_page is not None:
yield response.follow(next_page, callback=self.parse)
| 38.475 | 100 | 0.578298 |
import re
import scrapy
from crawlstocks.items import GuchengStockCodeItem
class GuchengblockcodesSpider(scrapy.Spider):
name = 'GuchengBlockCodes'
allowed_domains = ['hq.gucheng.com']
custom_settings = {
'ITEM_PIPELINES' : {'crawlstocks.pipelines.file.GuchengCrawlListPipeline':200}
}
def __init__(self, blockname='xiongan'):
if blockname == 'xiongan':
blid = '003813'
elif blockname == 'jingjinyi':
blid = '003684'
else:
blid = '003813'
self.start_urls = ['https://hq.gucheng.com/blockInfo/' + blid + '/']
def parse(self, response):
item = GuchengStockCodeItem()
for css in response.css('tbody tr td.stock_phone.stock_textLeft a'):
item['name'] = re.sub(r'\s+', '', css.xpath('./text()').get())
item['code'] = css.xpath('./@href').get()[1:-1]
yield item
next_page = response.xpath('//div[contains(@class, \
"stock_page")]/span/a[contains(.//text(), "下一页")]/@href').get()
if next_page is not None:
yield response.follow(next_page, callback=self.parse)
| true | true |
f726da1790877622a36dac64245198de83414f60 | 2,545 | py | Python | bigml/tests/create_projection_steps.py | devs-cloud/python_ml | 05d90f5ce1862a5d2d8ff99d2e46446dc1d5af3c | [
"Apache-2.0"
] | null | null | null | bigml/tests/create_projection_steps.py | devs-cloud/python_ml | 05d90f5ce1862a5d2d8ff99d2e46446dc1d5af3c | [
"Apache-2.0"
] | null | null | null | bigml/tests/create_projection_steps.py | devs-cloud/python_ml | 05d90f5ce1862a5d2d8ff99d2e46446dc1d5af3c | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
#!/usr/bin/env python
#
# Copyright 2018-2020 BigML
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import time
from nose.tools import assert_almost_equals, eq_, assert_is_not_none
from datetime import datetime, timedelta
from world import world
from bigml.api import HTTP_CREATED
from bigml.api import FINISHED, FAULTY
from bigml.api import get_status
from read_projection_steps import i_get_the_projection
def i_create_a_projection(step, data=None):
if data is None:
data = "{}"
pca = world.pca['resource']
data = json.loads(data)
resource = world.api.create_projection(pca, data)
world.status = resource['code']
eq_(world.status, HTTP_CREATED)
world.location = resource['location']
world.projection = resource['object']
world.projections.append(resource['resource'])
def the_projection_is(step, projection):
if projection is None:
projection = "{}"
projection = json.loads(projection)
eq_(len(projection.keys()),
len(world.projection['projection']['result'].keys()))
for name, value in projection.items():
eq_(world.projection['projection']['result'][name], projection[name],
"remote: %s, %s - expected: %s" % ( \
name, world.projection['projection']['result'][name],
projection[name]))
def wait_until_projection_status_code_is(step, code1, code2, secs):
start = datetime.utcnow()
delta = int(secs) * world.delta
i_get_the_projection(step, world.projection['resource'])
status = get_status(world.projection)
while (status['code'] != int(code1) and
status['code'] != int(code2)):
time.sleep(3)
assert_less((datetime.utcnow() - start).seconds, delta)
i_get_the_projection(step, world.projection['resource'])
status = get_status(world.projection)
eq_(status['code'], int(code1))
def the_projection_is_finished_in_less_than(step, secs):
wait_until_projection_status_code_is(step, FINISHED, FAULTY, secs)
| 35.84507 | 77 | 0.703733 |
import json
import time
from nose.tools import assert_almost_equals, eq_, assert_is_not_none
from datetime import datetime, timedelta
from world import world
from bigml.api import HTTP_CREATED
from bigml.api import FINISHED, FAULTY
from bigml.api import get_status
from read_projection_steps import i_get_the_projection
def i_create_a_projection(step, data=None):
if data is None:
data = "{}"
pca = world.pca['resource']
data = json.loads(data)
resource = world.api.create_projection(pca, data)
world.status = resource['code']
eq_(world.status, HTTP_CREATED)
world.location = resource['location']
world.projection = resource['object']
world.projections.append(resource['resource'])
def the_projection_is(step, projection):
if projection is None:
projection = "{}"
projection = json.loads(projection)
eq_(len(projection.keys()),
len(world.projection['projection']['result'].keys()))
for name, value in projection.items():
eq_(world.projection['projection']['result'][name], projection[name],
"remote: %s, %s - expected: %s" % ( \
name, world.projection['projection']['result'][name],
projection[name]))
def wait_until_projection_status_code_is(step, code1, code2, secs):
start = datetime.utcnow()
delta = int(secs) * world.delta
i_get_the_projection(step, world.projection['resource'])
status = get_status(world.projection)
while (status['code'] != int(code1) and
status['code'] != int(code2)):
time.sleep(3)
assert_less((datetime.utcnow() - start).seconds, delta)
i_get_the_projection(step, world.projection['resource'])
status = get_status(world.projection)
eq_(status['code'], int(code1))
def the_projection_is_finished_in_less_than(step, secs):
wait_until_projection_status_code_is(step, FINISHED, FAULTY, secs)
| true | true |
f726da9544773e11f11ee7b9f04bc69fd7f46c4b | 8,615 | py | Python | EOD_api/test_EOD_api.py | webclinic017/time-series-pipeline | 5ac418b91e395a48cba397f95d25d221adfff9bd | [
"MIT"
] | 3 | 2021-08-28T10:55:12.000Z | 2021-12-01T20:42:38.000Z | EOD_api/test_EOD_api.py | webclinic017/time-series-pipeline | 5ac418b91e395a48cba397f95d25d221adfff9bd | [
"MIT"
] | null | null | null | EOD_api/test_EOD_api.py | webclinic017/time-series-pipeline | 5ac418b91e395a48cba397f95d25d221adfff9bd | [
"MIT"
] | 1 | 2021-09-26T16:07:24.000Z | 2021-09-26T16:07:24.000Z | import os
import re
import datetime
import unittest
from io import StringIO
from unittest.mock import patch
import pandas as pd
import EOD_api as eod
TOKEN = os.environ["EOD_TOKEN"]
def date_parser(string):
date_pattern = re.compile("([0-9]{4}-[0-9]{2}-[0-9]{2})[ ]", re.VERBOSE)
return date_pattern.sub(r"\1T", string)
class TestGetEod(unittest.TestCase):
# @classmethod
# def setUp(cls):
# pass
# def tearDown(cls):
# pass
def test_idempotent__addtickers(self):
d1 = eod.OhlcvIntraday(
["AAPL.US"], TOKEN, "2020-10-13", "2020-10-17", intraday_frec="5m"
).add_tickers(["MSFT.US"])
d2 = (
eod.OhlcvIntraday(
["AAPL.US"], TOKEN, "2020-10-13", "2020-10-17", intraday_frec="5m"
)
.add_tickers(["MSFT.US"])
.add_tickers(["MSFT.US"])
)
self.assertEqual(d1, d2)
def test_idempotent_truncate_dates(self):
d1 = eod.Fundamental(
["AAPL.US"], TOKEN, "2020-10-13", "2020-10-17"
).truncate_dates("2020-10-14", "2020-10-16")
d2 = (
eod.Fundamental(["AAPL.US"], TOKEN, "2020-10-13", "2020-10-17")
.truncate_dates("2020-10-14", "2020-10-16")
.truncate_dates("2020-10-14", "2020-10-16")
)
self.assertEqual(d1, d2)
def test_idempotent_remove_tickers(self):
d1 = eod.Fundamental(
["AAPL.US", "MSFT.US"], TOKEN, "2020-10-13", "2020-10-17"
).remove_tickers(["MSFT.US"])
d2 = (
eod.Fundamental(["AAPL.US", "MSFT.US"], TOKEN, "2020-10-13", "2020-10-17")
.remove_tickers(["MSFT.US"])
.remove_tickers(["MSFT.US"])
)
self.assertEqual(d1, d2)
def test_add_remove(self):
d1 = eod.OhlcvIntraday(["AAPL.US"], TOKEN, "2020-10-13", "2020-10-17", "1m")
d2 = (
eod.OhlcvIntraday(["AAPL.US"], TOKEN, "2020-10-13", "2020-10-17", "1m")
.add_tickers(["MSFT.US"])
.remove_tickers(["MSFT.US"])
)
self.assertEqual(d1, d2)
def test_remove_all_tickers(self):
with self.assertRaises(Exception):
eod.Ohlcv(["AAPL.US"], TOKEN, "2020-10-13", "2020-10-17").remove_tickers(
["AAPL.US"]
).retrieve_data()
def test_misspelled_input(self):
with self.assertRaises(Exception):
eod.OhlcvIntraday(
["AAPL.US"], TOKEN, "2020-10-13", "2020-10-17", intraday_frec="Daoly"
)
def test_ohlcv_data_format_hasnt_changed(
self,
): # Cambiar de antes de formatting a después de formatting
expected_aapl = pd.read_csv(
StringIO(
"""
Date Open High Low Close Adjusted_close Volume
2020-10-13 125.27 125.390 119.65 121.10 120.7110 262330500.0
2020-10-14 121.00 123.030 119.62 121.19 120.8008 151062297.0
2020-10-15 118.72 121.200 118.15 120.71 120.3223 112559203.0
2020-10-16 121.28 121.548 118.81 119.02 118.6377 115393797.0
275 NaN NaN NaN NaN NaN NaN
"""
),
sep="\\s+",
)
url = "https://eodhistoricaldata.com/api/eod/AAPL.US?api_token={}&from=2020-10-13&to=2020-10-17&period=d".format(
TOKEN
)
actual = pd.read_csv(
url,
usecols=[
"Date",
"Volume",
"Open",
"Close",
"High",
"Low",
"Adjusted_close",
],
)
with patch.object(pd, "read_csv") as mock_read:
mock_read.autospec = True
mock_read.return_value = expected_aapl
expected = pd.read_csv(
url,
usecols=[
"Date",
"Volume",
"Open",
"Close",
"High",
"Low",
"Adjusted_close",
],
)
pd.testing.assert_frame_equal(actual, expected, rtol=5e-3)
def test_index_formatting(self):
expected_aapl = pd.read_csv(
StringIO(
"""
Date Open High Low Close Adjusted_close Volume
2020-10-13 125.27 125.390 119.65 121.10 120.7110 262330500.0
2020-10-14 121.00 123.030 119.62 121.19 120.8008 151062297.0
2020-10-15 118.72 121.200 118.15 120.71 120.3223 112559203.0
2020-10-16 121.28 121.548 118.81 119.02 118.6377 115393797.0
275 NaN NaN NaN NaN NaN NaN
"""
),
sep="\\s+",
)
expected_aapl_formatted = pd.read_csv(
StringIO(
date_parser(
"""
Stock Date Open High Low Close Adjusted_close Volume
AAPL.US 2020-10-13 00:00:00+00:00 125.27 125.390 119.65 121.10 120.7110 262330500.0
AAPL.US 2020-10-14 00:00:00+00:00 121.00 123.030 119.62 121.19 120.8008 151062297.0
AAPL.US 2020-10-15 00:00:00+00:00 118.72 121.200 118.15 120.71 120.3223 112559203.0
AAPL.US 2020-10-16 00:00:00+00:00 121.28 121.548 118.81 119.02 118.6377 115393797.0
"""
)
),
sep="\\s+",
index_col=[0, 1],
converters={"Date": lambda col: datetime.datetime.fromisoformat(col)},
)
with patch.object(pd, "read_csv") as mock_read:
mock_read.autospec = True
mock_read.return_value = expected_aapl
formatted_mock = eod.Ohlcv(
["AAPL.US"], TOKEN, "2020-10-13", "2020-10-17"
).retrieve_data()
pd.testing.assert_frame_equal(
formatted_mock, expected_aapl_formatted, rtol=5e-3
)
# TODO? Write more tests:
# Check that the data is concated/merged/joined properly, particularly when the indexes come with Nans
# Check except clauses
# Check duplicate df values
# Assert errors with wrong args
# etc
# expected_ohlcv_concatted = pd.read_csv( StringIO( date_parser( """
# Stock Date Gmtoffset Datetime Open High Low Close Volume Returns
# BP.LSE 2020-10-13 00:00:00+00:00 NaN NaN NaN NaN NaN NaN NaN NaN
# BP.LSE 2020-10-14 00:00:00+00:00 0.0 2020-10-13 15:25:00 213.649993 214.000000 213.550003 213.856994 1210380.0 -0.001601
# BP.LSE 2020-10-15 00:00:00+00:00 0.0 2020-10-14 15:25:00 213.000000 213.149993 212.600006 212.649993 1182246.0 0.019660
# BP.LSE 2020-10-16 00:00:00+00:00 0.0 2020-10-15 15:25:00 207.149993 207.199996 206.500000 206.850006 1626720.0 -0.013826
# AAPL.US 2020-10-13 00:00:00+00:00 NaN NaN NaN NaN NaN NaN NaN NaN
# AAPL.US 2020-10-14 00:00:00+00:00 0.0 2020-10-13 19:55:00 121.139999 121.279998 121.029998 121.050003 4585723.0 0.003648
# AAPL.US 2020-10-15 00:00:00+00:00 0.0 2020-10-14 19:55:00 121.580001 121.709999 121.139999 121.180000 3420583.0 0.015419
# AAPL.US 2020-10-16 00:00:00+00:00 0.0 2020-10-15 19:55:00 120.790000 120.849998 120.580001 120.699996 3436603.0 -0.003550
# MSFT.US 2020-10-13 00:00:00+00:00 NaN NaN NaN NaN NaN NaN NaN NaN
# MSFT.US 2020-10-14 00:00:00+00:00 0.0 2020-10-13 19:55:00 223.320007 223.389999 222.750000 222.830001 1457493.0 0.000651
# MSFT.US 2020-10-15 00:00:00+00:00 0.0 2020-10-14 19:55:00 221.199996 221.414993 220.600006 220.759994 1122912.0 0.012377
# MSFT.US 2020-10-16 00:00:00+00:00 0.0 2020-10-15 19:55:00 219.639999 219.880004 219.490005 219.660003 1201342.0 -0.003900
# """ ) ), sep="\\s+", index_col=[0,1,2], converters = {'Date' : lambda col: datetime.datetime.fromisoformat( col ) \
# , 'Datetime' : lambda col: pd.to_datetime(col, format='%Y-%m-%dT%H:%M:%S', utc=True) } )
if __name__ == "__main__":
unittest.main()
| 43.075 | 165 | 0.51863 | import os
import re
import datetime
import unittest
from io import StringIO
from unittest.mock import patch
import pandas as pd
import EOD_api as eod
TOKEN = os.environ["EOD_TOKEN"]
def date_parser(string):
date_pattern = re.compile("([0-9]{4}-[0-9]{2}-[0-9]{2})[ ]", re.VERBOSE)
return date_pattern.sub(r"\1T", string)
class TestGetEod(unittest.TestCase):
def test_idempotent__addtickers(self):
d1 = eod.OhlcvIntraday(
["AAPL.US"], TOKEN, "2020-10-13", "2020-10-17", intraday_frec="5m"
).add_tickers(["MSFT.US"])
d2 = (
eod.OhlcvIntraday(
["AAPL.US"], TOKEN, "2020-10-13", "2020-10-17", intraday_frec="5m"
)
.add_tickers(["MSFT.US"])
.add_tickers(["MSFT.US"])
)
self.assertEqual(d1, d2)
def test_idempotent_truncate_dates(self):
d1 = eod.Fundamental(
["AAPL.US"], TOKEN, "2020-10-13", "2020-10-17"
).truncate_dates("2020-10-14", "2020-10-16")
d2 = (
eod.Fundamental(["AAPL.US"], TOKEN, "2020-10-13", "2020-10-17")
.truncate_dates("2020-10-14", "2020-10-16")
.truncate_dates("2020-10-14", "2020-10-16")
)
self.assertEqual(d1, d2)
def test_idempotent_remove_tickers(self):
d1 = eod.Fundamental(
["AAPL.US", "MSFT.US"], TOKEN, "2020-10-13", "2020-10-17"
).remove_tickers(["MSFT.US"])
d2 = (
eod.Fundamental(["AAPL.US", "MSFT.US"], TOKEN, "2020-10-13", "2020-10-17")
.remove_tickers(["MSFT.US"])
.remove_tickers(["MSFT.US"])
)
self.assertEqual(d1, d2)
def test_add_remove(self):
d1 = eod.OhlcvIntraday(["AAPL.US"], TOKEN, "2020-10-13", "2020-10-17", "1m")
d2 = (
eod.OhlcvIntraday(["AAPL.US"], TOKEN, "2020-10-13", "2020-10-17", "1m")
.add_tickers(["MSFT.US"])
.remove_tickers(["MSFT.US"])
)
self.assertEqual(d1, d2)
def test_remove_all_tickers(self):
with self.assertRaises(Exception):
eod.Ohlcv(["AAPL.US"], TOKEN, "2020-10-13", "2020-10-17").remove_tickers(
["AAPL.US"]
).retrieve_data()
def test_misspelled_input(self):
with self.assertRaises(Exception):
eod.OhlcvIntraday(
["AAPL.US"], TOKEN, "2020-10-13", "2020-10-17", intraday_frec="Daoly"
)
def test_ohlcv_data_format_hasnt_changed(
self,
):
expected_aapl = pd.read_csv(
StringIO(
"""
Date Open High Low Close Adjusted_close Volume
2020-10-13 125.27 125.390 119.65 121.10 120.7110 262330500.0
2020-10-14 121.00 123.030 119.62 121.19 120.8008 151062297.0
2020-10-15 118.72 121.200 118.15 120.71 120.3223 112559203.0
2020-10-16 121.28 121.548 118.81 119.02 118.6377 115393797.0
275 NaN NaN NaN NaN NaN NaN
"""
),
sep="\\s+",
)
url = "https://eodhistoricaldata.com/api/eod/AAPL.US?api_token={}&from=2020-10-13&to=2020-10-17&period=d".format(
TOKEN
)
actual = pd.read_csv(
url,
usecols=[
"Date",
"Volume",
"Open",
"Close",
"High",
"Low",
"Adjusted_close",
],
)
with patch.object(pd, "read_csv") as mock_read:
mock_read.autospec = True
mock_read.return_value = expected_aapl
expected = pd.read_csv(
url,
usecols=[
"Date",
"Volume",
"Open",
"Close",
"High",
"Low",
"Adjusted_close",
],
)
pd.testing.assert_frame_equal(actual, expected, rtol=5e-3)
def test_index_formatting(self):
expected_aapl = pd.read_csv(
StringIO(
"""
Date Open High Low Close Adjusted_close Volume
2020-10-13 125.27 125.390 119.65 121.10 120.7110 262330500.0
2020-10-14 121.00 123.030 119.62 121.19 120.8008 151062297.0
2020-10-15 118.72 121.200 118.15 120.71 120.3223 112559203.0
2020-10-16 121.28 121.548 118.81 119.02 118.6377 115393797.0
275 NaN NaN NaN NaN NaN NaN
"""
),
sep="\\s+",
)
expected_aapl_formatted = pd.read_csv(
StringIO(
date_parser(
"""
Stock Date Open High Low Close Adjusted_close Volume
AAPL.US 2020-10-13 00:00:00+00:00 125.27 125.390 119.65 121.10 120.7110 262330500.0
AAPL.US 2020-10-14 00:00:00+00:00 121.00 123.030 119.62 121.19 120.8008 151062297.0
AAPL.US 2020-10-15 00:00:00+00:00 118.72 121.200 118.15 120.71 120.3223 112559203.0
AAPL.US 2020-10-16 00:00:00+00:00 121.28 121.548 118.81 119.02 118.6377 115393797.0
"""
)
),
sep="\\s+",
index_col=[0, 1],
converters={"Date": lambda col: datetime.datetime.fromisoformat(col)},
)
with patch.object(pd, "read_csv") as mock_read:
mock_read.autospec = True
mock_read.return_value = expected_aapl
formatted_mock = eod.Ohlcv(
["AAPL.US"], TOKEN, "2020-10-13", "2020-10-17"
).retrieve_data()
pd.testing.assert_frame_equal(
formatted_mock, expected_aapl_formatted, rtol=5e-3
)
# Stock Date Gmtoffset Datetime Open High Low Close Volume Returns
# BP.LSE 2020-10-13 00:00:00+00:00 NaN NaN NaN NaN NaN NaN NaN NaN
# BP.LSE 2020-10-14 00:00:00+00:00 0.0 2020-10-13 15:25:00 213.649993 214.000000 213.550003 213.856994 1210380.0 -0.001601
# BP.LSE 2020-10-15 00:00:00+00:00 0.0 2020-10-14 15:25:00 213.000000 213.149993 212.600006 212.649993 1182246.0 0.019660
# BP.LSE 2020-10-16 00:00:00+00:00 0.0 2020-10-15 15:25:00 207.149993 207.199996 206.500000 206.850006 1626720.0 -0.013826
# AAPL.US 2020-10-13 00:00:00+00:00 NaN NaN NaN NaN NaN NaN NaN NaN
# AAPL.US 2020-10-14 00:00:00+00:00 0.0 2020-10-13 19:55:00 121.139999 121.279998 121.029998 121.050003 4585723.0 0.003648
# AAPL.US 2020-10-15 00:00:00+00:00 0.0 2020-10-14 19:55:00 121.580001 121.709999 121.139999 121.180000 3420583.0 0.015419
# AAPL.US 2020-10-16 00:00:00+00:00 0.0 2020-10-15 19:55:00 120.790000 120.849998 120.580001 120.699996 3436603.0 -0.003550
# MSFT.US 2020-10-13 00:00:00+00:00 NaN NaN NaN NaN NaN NaN NaN NaN
# MSFT.US 2020-10-14 00:00:00+00:00 0.0 2020-10-13 19:55:00 223.320007 223.389999 222.750000 222.830001 1457493.0 0.000651
# MSFT.US 2020-10-15 00:00:00+00:00 0.0 2020-10-14 19:55:00 221.199996 221.414993 220.600006 220.759994 1122912.0 0.012377
# MSFT.US 2020-10-16 00:00:00+00:00 0.0 2020-10-15 19:55:00 219.639999 219.880004 219.490005 219.660003 1201342.0 -0.003900
# """ ) ), sep="\\s+", index_col=[0,1,2], converters = {'Date' : lambda col: datetime.datetime.fromisoformat( col ) \
if __name__ == "__main__":
unittest.main()
| true | true |
f726daae43a8790a611a80a7e3876da1fd12b7ee | 2,804 | py | Python | var/spack/repos/builtin/packages/r-pmcmrplus/package.py | player1537-forks/spack | 822b7632222ec5a91dc7b7cda5fc0e08715bd47c | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 11 | 2015-10-04T02:17:46.000Z | 2018-02-07T18:23:00.000Z | var/spack/repos/builtin/packages/r-pmcmrplus/package.py | player1537-forks/spack | 822b7632222ec5a91dc7b7cda5fc0e08715bd47c | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 22 | 2017-08-01T22:45:10.000Z | 2022-03-10T07:46:31.000Z | var/spack/repos/builtin/packages/r-pmcmrplus/package.py | player1537-forks/spack | 822b7632222ec5a91dc7b7cda5fc0e08715bd47c | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 4 | 2016-06-10T17:57:39.000Z | 2018-09-11T04:59:38.000Z | # Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RPmcmrplus(RPackage):
"""Calculate Pairwise Multiple Comparisons of Mean Rank Sums Extended.
For one-way layout experiments the one-way ANOVA can be performed as an
omnibus test. All-pairs multiple comparisons tests (Tukey-Kramer test,
Scheffe test, LSD-test) and many-to-one tests (Dunnett test) for normally
distributed residuals and equal within variance are available. Furthermore,
all-pairs tests (Games-Howell test, Tamhane's T2 test, Dunnett T3 test,
Ury-Wiggins-Hochberg test) and many-to-one (Tamhane-Dunnett Test) for
normally distributed residuals and heterogeneous variances are provided.
Van der Waerden's normal scores test for omnibus, all-pairs and many-to-one
tests is provided for non-normally distributed residuals and homogeneous
variances. The Kruskal-Wallis, BWS and Anderson-Darling omnibus test and
all-pairs tests (Nemenyi test, Dunn test, Conover test,
Dwass-Steele-Critchlow- Fligner test) as well as many-to-one (Nemenyi test,
Dunn test, U-test) are given for the analysis of variance by ranks.
Non-parametric trend tests (Jonckheere test, Cuzick test, Johnson-Mehrotra
test, Spearman test) are included. In addition, a Friedman-test for
one-way ANOVA with repeated measures on ranks (CRBD) and Skillings-Mack
test for unbalanced CRBD is provided with consequent all-pairs tests
(Nemenyi test, Siegel test, Miller test, Conover test, Exact test) and
many-to-one tests (Nemenyi test, Demsar test, Exact test). A trend can be
tested with Pages's test. Durbin's test for a two-way balanced incomplete
block design (BIBD) is given in this package as well as Gore's test for
CRBD with multiple observations per cell is given. Outlier tests, Mandel's
k- and h statistic as well as functions for Type I error and Power analysis
as well as generic summary, print and plot methods are provided."""
cran = "PMCMRplus"
version('1.9.3', sha256='76baba60f57343fa5bb6f6d2ea27aab77178e02b0d2f9d5d74abde7d18994f03')
depends_on('r@3.5.0:', type=('build', 'run'))
depends_on('r-mvtnorm@1.0:', type=('build', 'run'))
depends_on('r-multcompview', type=('build', 'run'))
depends_on('r-gmp', type=('build', 'run'))
depends_on('r-rmpfr', type=('build', 'run'))
depends_on('r-suppdists', type=('build', 'run'))
depends_on('r-ksamples@1.2.7:', type=('build', 'run'))
depends_on('r-bwstest@0.2.1:', type=('build', 'run'))
depends_on('r-mass', type=('build', 'run'))
depends_on('gmp@4.2.3:')
depends_on('mpfr@3.0.0:')
| 53.923077 | 95 | 0.722183 |
from spack import *
class RPmcmrplus(RPackage):
cran = "PMCMRplus"
version('1.9.3', sha256='76baba60f57343fa5bb6f6d2ea27aab77178e02b0d2f9d5d74abde7d18994f03')
depends_on('r@3.5.0:', type=('build', 'run'))
depends_on('r-mvtnorm@1.0:', type=('build', 'run'))
depends_on('r-multcompview', type=('build', 'run'))
depends_on('r-gmp', type=('build', 'run'))
depends_on('r-rmpfr', type=('build', 'run'))
depends_on('r-suppdists', type=('build', 'run'))
depends_on('r-ksamples@1.2.7:', type=('build', 'run'))
depends_on('r-bwstest@0.2.1:', type=('build', 'run'))
depends_on('r-mass', type=('build', 'run'))
depends_on('gmp@4.2.3:')
depends_on('mpfr@3.0.0:')
| true | true |
f726dce4683e7d5956b6554b0e5f04d2913f0e26 | 4,225 | py | Python | session4/e_animations_2axis.py | Leylasaadi/MACT20.21_Digital_tools_Big_Data_part_2 | 94cafa0581ec36a305867ebfdcb91c787aa77a16 | [
"Apache-2.0"
] | null | null | null | session4/e_animations_2axis.py | Leylasaadi/MACT20.21_Digital_tools_Big_Data_part_2 | 94cafa0581ec36a305867ebfdcb91c787aa77a16 | [
"Apache-2.0"
] | null | null | null | session4/e_animations_2axis.py | Leylasaadi/MACT20.21_Digital_tools_Big_Data_part_2 | 94cafa0581ec36a305867ebfdcb91c787aa77a16 | [
"Apache-2.0"
] | null | null | null | # encoding: utf-8
##################################################
# This script shows how to create animated plots using matplotlib and a basic dataset
# Multiple tutorials inspired the current design but they mostly came from:
# hhttps://towardsdatascience.com/how-to-create-animated-graphs-in-python-bb619cc2dec1
# Note: the project keeps updating every course almost yearly
##################################################
#
##################################################
# Author: Diego Pajarito
# Credits: [Institute for Advanced Architecture of Catalonia - IAAC, Advanced Architecture group]
# License: Apache License Version 2.0
# Version: 1.0.0
# Maintainer: Diego Pajarito
# Email: diego.pajarito@iaac.net
# Status: development
##################################################
import matplotlib
import matplotlib.animation as animation
import matplotlib.pyplot as plt
import numpy as np
# We need to import numpy and matplotlib library
# importing libraries
import pandas as pd
import seaborn as sns
# Read files and prepare data
data = pd.read_csv('../data/2021_seguiment-covid19-bcn.csv')
#data = pd.read_csv('https://opendata-ajuntament.barcelona.cat/data/dataset/4f3ffbda-d5be-4f2a-a836-26a77be6df1a/resource/f627ac0a-d05f-416d-9773-eeb464a3fc44/download')
data.columns = ['date_indicator', 'frequency_indicator', 'place', 'name_indicator',
'name_variable', 'value', 'unit', 'source']
# We will use two datasets to generate plots
data_daily = data[data['name_indicator'] == 'Casos de COVID-19 a Barcelona (diari)']
data_accumulated = data[data['name_indicator'] == 'Casos de COVID-19 a Barcelona (acumulat)']
# We need the data to be in time format to calculate values in days after day zero
data_daily.loc[:, 'date_indicator'] = pd.to_datetime(data_daily['date_indicator'])
initial_day = data_daily['date_indicator'].min()
data_daily.loc[:, 'day_after_zero'] = data_daily['date_indicator'] - initial_day
data_daily.loc[:, 'day_after_zero'] = data_daily['day_after_zero']/np.timedelta64(1, 'D')
# We need the data to be in time format to calculate values in days after day zero
data_accumulated.loc[:, 'date_indicator'] = pd.to_datetime(data_accumulated['date_indicator'])
data_accumulated.loc[:, 'day_after_zero'] = data_accumulated['date_indicator'] - initial_day
data_accumulated.loc[:, 'day_after_zero'] = data_accumulated['day_after_zero']/np.timedelta64(1, 'D')
# we also extract some values to set the plot limits
max_day = data_daily['day_after_zero'].max().astype(int)
max_cases_daily = data_daily['value'].max()
max_cases_accumulated = data_accumulated['value'].max()
title = 'Barcelona: '
# We then prepare the writer and animation file options
Writer = animation.writers['ffmpeg']
writer = Writer(fps=20, metadata=dict(artist='MaCTResearcher'), bitrate=1800)
# If error using anaconda try to install ffmpeg
# conda install -c conda-forge ffmpeg
# We create an initial plot with basic configuration a single line
fig, ax1 = plt.subplots()
fig.set_size_inches(10, 6)
plt.title(title + 'Covid-19 cases', fontsize=18)
plt.xlabel('Day after case 1', fontsize=14)
plt.ylim(0, max_cases_accumulated)
plt.ylabel('Accumulated', fontsize=18)
# # now we configure the secondary axis
ax2 = ax1.twinx()
plt.ylim(0, max_cases_daily*2)
cases_ticks = np.arange(0, max_day, 50)
# We need to set an animation function to handle individual behaviour per frame
# variable "i" is the frame id that can be used to handle queries or filters for your data
def animate(i):
frame_data_daily = data_daily[data_daily['day_after_zero'] <= i]
frame_data_accumulated = data_accumulated[data_accumulated['day_after_zero'] <= i]
sns.lineplot(x='day_after_zero', y='value', data=frame_data_accumulated, color="r", ax=ax1)
sns.barplot(x='day_after_zero', y='value', data=frame_data_daily, color='b', ax=ax2)
plt.ylabel('Daily', fontsize=18)
plt.xlim(0, max_day)
plt.xticks(cases_ticks)
plt.xlabel('Day after case 1', fontsize=18)
# Handling secondary axis implies different management in the animate function
ani = matplotlib.animation.FuncAnimation(fig, animate, frames=max_day, repeat=True)
ani.save('covid_cases_bcn_2axis.mp4', writer=writer)
print('end')
| 46.428571 | 169 | 0.725444 | true | true | |
f726ddf1c1dac0d3d3a8df65efc42e4d30590ce6 | 9,073 | py | Python | mars/lib/nvutils.py | hxri/mars | f7864f00911883b94800b63856f0e57648d3d9b4 | [
"Apache-2.0"
] | 2,413 | 2018-12-06T09:37:11.000Z | 2022-03-30T15:47:39.000Z | mars/lib/nvutils.py | hxri/mars | f7864f00911883b94800b63856f0e57648d3d9b4 | [
"Apache-2.0"
] | 1,335 | 2018-12-07T03:06:18.000Z | 2022-03-31T11:45:57.000Z | mars/lib/nvutils.py | hxri/mars | f7864f00911883b94800b63856f0e57648d3d9b4 | [
"Apache-2.0"
] | 329 | 2018-12-07T03:12:41.000Z | 2022-03-29T21:49:57.000Z | # -*- coding: utf-8 -*-
# Copyright 1999-2021 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import sys
import uuid
from collections import namedtuple
from ctypes import c_char, c_char_p, c_int, c_uint, c_ulonglong, byref,\
create_string_buffer, Structure, POINTER, CDLL
logger = logging.getLogger(__name__)
# Some constants taken from cuda.h
CUDA_SUCCESS = 0
CU_DEVICE_ATTRIBUTE_MULTIPROCESSOR_COUNT = 16
CU_DEVICE_ATTRIBUTE_MAX_THREADS_PER_MULTIPROCESSOR = 39
CU_DEVICE_ATTRIBUTE_CLOCK_RATE = 13
CU_DEVICE_ATTRIBUTE_PCI_BUS_ID = 33
CU_DEVICE_ATTRIBUTE_PCI_DEVICE_ID = 34
CU_DEVICE_ATTRIBUTE_MEMORY_CLOCK_RATE = 36
CU_NO_CUDA_CAPABLE_DEVICE_DETECTED = 100
# nvml constants
NVML_SUCCESS = 0
NVML_TEMPERATURE_GPU = 0
NVML_DRIVER_NOT_LOADED = 9
class _CUuuid_t(Structure):
_fields_ = [
('bytes', c_char * 16)
]
class _nvmlUtilization_t(Structure):
_fields_ = [
('gpu', c_uint),
('memory', c_uint),
]
class _struct_nvmlDevice_t(Structure):
pass # opaque handle
_nvmlDevice_t = POINTER(_struct_nvmlDevice_t)
class _nvmlBAR1Memory_t(Structure):
_fields_ = [
('total', c_ulonglong),
('free', c_ulonglong),
('used', c_ulonglong),
]
_is_windows: bool = sys.platform.startswith('win')
def _load_nv_library(*libnames):
for lib in libnames:
try:
return CDLL(lib)
except OSError:
continue
_cuda_lib = _nvml_lib = None
_cu_device_info = namedtuple('_cu_device_info', 'index uuid name multiprocessors cuda_cores threads')
_nvml_driver_info = namedtuple('_nvml_driver_info', 'driver_version cuda_version')
_nvml_device_status = namedtuple(
'_nvml_device_status', 'gpu_util mem_util temperature fb_total_mem fb_used_mem fb_free_mem')
_init_pid = None
_gpu_count = None
_driver_info = None
_device_infos = dict()
_no_device_warned = False
class NVError(Exception):
def __init__(self, msg, *args, errno=None):
self._errno = errno
super().__init__(msg or 'Unknown error', *args)
def __str__(self):
return f'({self._errno}) {super().__str__()}'
@property
def errno(self):
return self._errno
@property
def message(self):
return super().__str__()
class NVDeviceAPIError(NVError):
pass
class NVMLAPIError(NVError):
pass
def _cu_check_error(result):
if result != CUDA_SUCCESS:
_error_str = c_char_p()
_cuda_lib.cuGetErrorString(result, byref(_error_str))
raise NVDeviceAPIError(_error_str.value.decode(), errno=result)
_nvmlErrorString = None
def _nvml_check_error(result):
global _nvmlErrorString
if _nvmlErrorString is None:
_nvmlErrorString = _nvml_lib.nvmlErrorString
_nvmlErrorString.restype = c_char_p
if result != NVML_SUCCESS:
_error_str = _nvmlErrorString(result)
raise NVMLAPIError(_error_str.decode(), errno=result)
_cu_process_var_to_cores = {
(1, 0): 8,
(1, 1): 8,
(1, 2): 8,
(1, 3): 8,
(2, 0): 32,
(2, 1): 48,
}
def _cu_get_processor_cores(major, minor):
return _cu_process_var_to_cores.get((major, minor), 192)
def _init_cp():
global _cuda_lib, _no_device_warned
if _init_pid == os.getpid():
return
_cuda_lib = _load_nv_library('libcuda.so', 'libcuda.dylib', 'cuda.dll', 'nvcuda.dll')
if _cuda_lib is None:
return
try:
_cu_check_error(_cuda_lib.cuInit(0))
except NVDeviceAPIError as ex:
if ex.errno == CU_NO_CUDA_CAPABLE_DEVICE_DETECTED:
_cuda_lib = None
if not _no_device_warned:
logger.warning('No CUDA device detected')
_no_device_warned = True
else:
logger.exception('Failed to initialize libcuda.')
return
def _init_nvml():
global _nvml_lib, _no_device_warned
if _init_pid == os.getpid():
return
nvml_paths = ['libnvidia-ml.so', 'libnvidia-ml.so.1', 'libnvidia-ml.dylib', 'nvml.dll']
if _is_windows:
nvml_paths.append(os.path.join(os.getenv("ProgramFiles", "C:/Program Files"),
"NVIDIA Corporation/NVSMI/nvml.dll"))
_nvml_lib = _load_nv_library(*nvml_paths)
if _nvml_lib is None:
return
try:
_nvml_check_error(_nvml_lib.nvmlInit_v2())
except NVMLAPIError as ex:
if ex.errno == NVML_DRIVER_NOT_LOADED:
_nvml_lib = None
if not _no_device_warned:
logger.warning('Failed to load libnvidia-ml: %s, no CUDA device will be enabled', ex.message)
_no_device_warned = True
else:
logger.exception('Failed to initialize libnvidia-ml.')
return
def _init():
global _init_pid
_init_cp()
_init_nvml()
if _nvml_lib is not None and _cuda_lib is not None:
_init_pid = os.getpid()
def get_device_count():
global _gpu_count
if _gpu_count is not None:
return _gpu_count
_init_nvml()
if _nvml_lib is None:
return None
if 'CUDA_VISIBLE_DEVICES' in os.environ:
devices = os.environ['CUDA_VISIBLE_DEVICES'].strip()
if not devices:
_gpu_count = 0
else:
_gpu_count = len(devices.split(','))
else:
n_gpus = c_uint()
_cu_check_error(_nvml_lib.nvmlDeviceGetCount(byref(n_gpus)))
_gpu_count = n_gpus.value
return _gpu_count
def get_driver_info():
global _driver_info
_init_nvml()
if _nvml_lib is None:
return None
if _driver_info is not None:
return _driver_info
version_buf = create_string_buffer(100)
cuda_version = c_uint()
_nvml_check_error(_nvml_lib.nvmlSystemGetDriverVersion(version_buf, len(version_buf)))
_nvml_check_error(_nvml_lib.nvmlSystemGetCudaDriverVersion(byref(cuda_version)))
_driver_info = _nvml_driver_info(
driver_version=version_buf.value.decode(),
cuda_version='.'.join(str(v) for v in divmod(cuda_version.value, 1000))
)
return _driver_info
def get_device_info(dev_index):
try:
return _device_infos[dev_index]
except KeyError:
pass
_init()
if _init_pid is None:
return None
device = c_int()
name_buf = create_string_buffer(100)
uuid_t = _CUuuid_t()
cc_major = c_int()
cc_minor = c_int()
cores = c_int()
threads_per_core = c_int()
_cu_check_error(_cuda_lib.cuDeviceGet(byref(device), c_int(dev_index)))
_cu_check_error(_cuda_lib.cuDeviceGetName(name_buf, len(name_buf), device))
_cu_check_error(_cuda_lib.cuDeviceGetUuid(byref(uuid_t), device))
_cu_check_error(_cuda_lib.cuDeviceComputeCapability(
byref(cc_major), byref(cc_minor), device))
_cu_check_error(_cuda_lib.cuDeviceGetAttribute(
byref(cores), CU_DEVICE_ATTRIBUTE_MULTIPROCESSOR_COUNT, device))
_cu_check_error(_cuda_lib.cuDeviceGetAttribute(
byref(threads_per_core), CU_DEVICE_ATTRIBUTE_MAX_THREADS_PER_MULTIPROCESSOR, device))
if 'CUDA_VISIBLE_DEVICES' in os.environ:
real_dev_index = [int(s) for s in os.environ['CUDA_VISIBLE_DEVICES'].split(',')][dev_index]
else:
real_dev_index = dev_index
info = _device_infos[dev_index] = _cu_device_info(
index=real_dev_index,
uuid=uuid.UUID(bytes=uuid_t.bytes),
name=name_buf.value.decode(),
multiprocessors=cores.value,
cuda_cores=cores.value * _cu_get_processor_cores(cc_major.value, cc_minor.value),
threads=cores.value * threads_per_core.value,
)
return info
def get_device_status(dev_index):
_init()
if _init_pid is None:
return None
device = _nvmlDevice_t()
utils = _nvmlUtilization_t()
temperature = c_uint()
memory_info = _nvmlBAR1Memory_t()
dev_uuid = get_device_info(dev_index).uuid
uuid_str = ('GPU-' + str(dev_uuid)).encode()
_nvml_check_error(_nvml_lib.nvmlDeviceGetHandleByUUID(uuid_str, byref(device)))
_nvml_check_error(_nvml_lib.nvmlDeviceGetUtilizationRates(device, byref(utils)))
_nvml_check_error(_nvml_lib.nvmlDeviceGetTemperature(
device, NVML_TEMPERATURE_GPU, byref(temperature)))
_nvml_check_error(_nvml_lib.nvmlDeviceGetBAR1MemoryInfo(device, byref(memory_info)))
return _nvml_device_status(
gpu_util=utils.gpu,
mem_util=utils.memory,
temperature=temperature.value,
fb_total_mem=memory_info.total,
fb_free_mem=memory_info.free,
fb_used_mem=memory_info.used,
)
| 27.831288 | 109 | 0.689518 |
import logging
import os
import sys
import uuid
from collections import namedtuple
from ctypes import c_char, c_char_p, c_int, c_uint, c_ulonglong, byref,\
create_string_buffer, Structure, POINTER, CDLL
logger = logging.getLogger(__name__)
CUDA_SUCCESS = 0
CU_DEVICE_ATTRIBUTE_MULTIPROCESSOR_COUNT = 16
CU_DEVICE_ATTRIBUTE_MAX_THREADS_PER_MULTIPROCESSOR = 39
CU_DEVICE_ATTRIBUTE_CLOCK_RATE = 13
CU_DEVICE_ATTRIBUTE_PCI_BUS_ID = 33
CU_DEVICE_ATTRIBUTE_PCI_DEVICE_ID = 34
CU_DEVICE_ATTRIBUTE_MEMORY_CLOCK_RATE = 36
CU_NO_CUDA_CAPABLE_DEVICE_DETECTED = 100
NVML_SUCCESS = 0
NVML_TEMPERATURE_GPU = 0
NVML_DRIVER_NOT_LOADED = 9
class _CUuuid_t(Structure):
_fields_ = [
('bytes', c_char * 16)
]
class _nvmlUtilization_t(Structure):
_fields_ = [
('gpu', c_uint),
('memory', c_uint),
]
class _struct_nvmlDevice_t(Structure):
pass
_nvmlDevice_t = POINTER(_struct_nvmlDevice_t)
class _nvmlBAR1Memory_t(Structure):
_fields_ = [
('total', c_ulonglong),
('free', c_ulonglong),
('used', c_ulonglong),
]
_is_windows: bool = sys.platform.startswith('win')
def _load_nv_library(*libnames):
for lib in libnames:
try:
return CDLL(lib)
except OSError:
continue
_cuda_lib = _nvml_lib = None
_cu_device_info = namedtuple('_cu_device_info', 'index uuid name multiprocessors cuda_cores threads')
_nvml_driver_info = namedtuple('_nvml_driver_info', 'driver_version cuda_version')
_nvml_device_status = namedtuple(
'_nvml_device_status', 'gpu_util mem_util temperature fb_total_mem fb_used_mem fb_free_mem')
_init_pid = None
_gpu_count = None
_driver_info = None
_device_infos = dict()
_no_device_warned = False
class NVError(Exception):
def __init__(self, msg, *args, errno=None):
self._errno = errno
super().__init__(msg or 'Unknown error', *args)
def __str__(self):
return f'({self._errno}) {super().__str__()}'
@property
def errno(self):
return self._errno
@property
def message(self):
return super().__str__()
class NVDeviceAPIError(NVError):
pass
class NVMLAPIError(NVError):
pass
def _cu_check_error(result):
if result != CUDA_SUCCESS:
_error_str = c_char_p()
_cuda_lib.cuGetErrorString(result, byref(_error_str))
raise NVDeviceAPIError(_error_str.value.decode(), errno=result)
_nvmlErrorString = None
def _nvml_check_error(result):
global _nvmlErrorString
if _nvmlErrorString is None:
_nvmlErrorString = _nvml_lib.nvmlErrorString
_nvmlErrorString.restype = c_char_p
if result != NVML_SUCCESS:
_error_str = _nvmlErrorString(result)
raise NVMLAPIError(_error_str.decode(), errno=result)
_cu_process_var_to_cores = {
(1, 0): 8,
(1, 1): 8,
(1, 2): 8,
(1, 3): 8,
(2, 0): 32,
(2, 1): 48,
}
def _cu_get_processor_cores(major, minor):
return _cu_process_var_to_cores.get((major, minor), 192)
def _init_cp():
global _cuda_lib, _no_device_warned
if _init_pid == os.getpid():
return
_cuda_lib = _load_nv_library('libcuda.so', 'libcuda.dylib', 'cuda.dll', 'nvcuda.dll')
if _cuda_lib is None:
return
try:
_cu_check_error(_cuda_lib.cuInit(0))
except NVDeviceAPIError as ex:
if ex.errno == CU_NO_CUDA_CAPABLE_DEVICE_DETECTED:
_cuda_lib = None
if not _no_device_warned:
logger.warning('No CUDA device detected')
_no_device_warned = True
else:
logger.exception('Failed to initialize libcuda.')
return
def _init_nvml():
global _nvml_lib, _no_device_warned
if _init_pid == os.getpid():
return
nvml_paths = ['libnvidia-ml.so', 'libnvidia-ml.so.1', 'libnvidia-ml.dylib', 'nvml.dll']
if _is_windows:
nvml_paths.append(os.path.join(os.getenv("ProgramFiles", "C:/Program Files"),
"NVIDIA Corporation/NVSMI/nvml.dll"))
_nvml_lib = _load_nv_library(*nvml_paths)
if _nvml_lib is None:
return
try:
_nvml_check_error(_nvml_lib.nvmlInit_v2())
except NVMLAPIError as ex:
if ex.errno == NVML_DRIVER_NOT_LOADED:
_nvml_lib = None
if not _no_device_warned:
logger.warning('Failed to load libnvidia-ml: %s, no CUDA device will be enabled', ex.message)
_no_device_warned = True
else:
logger.exception('Failed to initialize libnvidia-ml.')
return
def _init():
global _init_pid
_init_cp()
_init_nvml()
if _nvml_lib is not None and _cuda_lib is not None:
_init_pid = os.getpid()
def get_device_count():
global _gpu_count
if _gpu_count is not None:
return _gpu_count
_init_nvml()
if _nvml_lib is None:
return None
if 'CUDA_VISIBLE_DEVICES' in os.environ:
devices = os.environ['CUDA_VISIBLE_DEVICES'].strip()
if not devices:
_gpu_count = 0
else:
_gpu_count = len(devices.split(','))
else:
n_gpus = c_uint()
_cu_check_error(_nvml_lib.nvmlDeviceGetCount(byref(n_gpus)))
_gpu_count = n_gpus.value
return _gpu_count
def get_driver_info():
global _driver_info
_init_nvml()
if _nvml_lib is None:
return None
if _driver_info is not None:
return _driver_info
version_buf = create_string_buffer(100)
cuda_version = c_uint()
_nvml_check_error(_nvml_lib.nvmlSystemGetDriverVersion(version_buf, len(version_buf)))
_nvml_check_error(_nvml_lib.nvmlSystemGetCudaDriverVersion(byref(cuda_version)))
_driver_info = _nvml_driver_info(
driver_version=version_buf.value.decode(),
cuda_version='.'.join(str(v) for v in divmod(cuda_version.value, 1000))
)
return _driver_info
def get_device_info(dev_index):
try:
return _device_infos[dev_index]
except KeyError:
pass
_init()
if _init_pid is None:
return None
device = c_int()
name_buf = create_string_buffer(100)
uuid_t = _CUuuid_t()
cc_major = c_int()
cc_minor = c_int()
cores = c_int()
threads_per_core = c_int()
_cu_check_error(_cuda_lib.cuDeviceGet(byref(device), c_int(dev_index)))
_cu_check_error(_cuda_lib.cuDeviceGetName(name_buf, len(name_buf), device))
_cu_check_error(_cuda_lib.cuDeviceGetUuid(byref(uuid_t), device))
_cu_check_error(_cuda_lib.cuDeviceComputeCapability(
byref(cc_major), byref(cc_minor), device))
_cu_check_error(_cuda_lib.cuDeviceGetAttribute(
byref(cores), CU_DEVICE_ATTRIBUTE_MULTIPROCESSOR_COUNT, device))
_cu_check_error(_cuda_lib.cuDeviceGetAttribute(
byref(threads_per_core), CU_DEVICE_ATTRIBUTE_MAX_THREADS_PER_MULTIPROCESSOR, device))
if 'CUDA_VISIBLE_DEVICES' in os.environ:
real_dev_index = [int(s) for s in os.environ['CUDA_VISIBLE_DEVICES'].split(',')][dev_index]
else:
real_dev_index = dev_index
info = _device_infos[dev_index] = _cu_device_info(
index=real_dev_index,
uuid=uuid.UUID(bytes=uuid_t.bytes),
name=name_buf.value.decode(),
multiprocessors=cores.value,
cuda_cores=cores.value * _cu_get_processor_cores(cc_major.value, cc_minor.value),
threads=cores.value * threads_per_core.value,
)
return info
def get_device_status(dev_index):
_init()
if _init_pid is None:
return None
device = _nvmlDevice_t()
utils = _nvmlUtilization_t()
temperature = c_uint()
memory_info = _nvmlBAR1Memory_t()
dev_uuid = get_device_info(dev_index).uuid
uuid_str = ('GPU-' + str(dev_uuid)).encode()
_nvml_check_error(_nvml_lib.nvmlDeviceGetHandleByUUID(uuid_str, byref(device)))
_nvml_check_error(_nvml_lib.nvmlDeviceGetUtilizationRates(device, byref(utils)))
_nvml_check_error(_nvml_lib.nvmlDeviceGetTemperature(
device, NVML_TEMPERATURE_GPU, byref(temperature)))
_nvml_check_error(_nvml_lib.nvmlDeviceGetBAR1MemoryInfo(device, byref(memory_info)))
return _nvml_device_status(
gpu_util=utils.gpu,
mem_util=utils.memory,
temperature=temperature.value,
fb_total_mem=memory_info.total,
fb_free_mem=memory_info.free,
fb_used_mem=memory_info.used,
)
| true | true |
f726de42bea9102ed23d3fe9ef9fa07cf1e1fe0c | 595 | py | Python | dbaas/account/admin/__init__.py | didindinn/database-as-a-service | 747de31ff8546f7874ddd654af860e130afd17a0 | [
"BSD-3-Clause"
] | null | null | null | dbaas/account/admin/__init__.py | didindinn/database-as-a-service | 747de31ff8546f7874ddd654af860e130afd17a0 | [
"BSD-3-Clause"
] | null | null | null | dbaas/account/admin/__init__.py | didindinn/database-as-a-service | 747de31ff8546f7874ddd654af860e130afd17a0 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from django.contrib import admin
from django.contrib.auth.models import User, Group
from ..models import Team, Role, AccountUser, Organization
from .user import CustomUserAdmin
from .role import RoleAdmin
from .team import TeamAdmin
from .organization import OrganizationAdmin
admin.site.unregister(User)
admin.site.unregister(Group)
admin.site.register(AccountUser, CustomUserAdmin)
admin.site.register(Role, RoleAdmin)
admin.site.register(Team, TeamAdmin)
admin.site.register(Organization, OrganizationAdmin)
| 31.315789 | 58 | 0.820168 |
from __future__ import absolute_import, unicode_literals
from django.contrib import admin
from django.contrib.auth.models import User, Group
from ..models import Team, Role, AccountUser, Organization
from .user import CustomUserAdmin
from .role import RoleAdmin
from .team import TeamAdmin
from .organization import OrganizationAdmin
admin.site.unregister(User)
admin.site.unregister(Group)
admin.site.register(AccountUser, CustomUserAdmin)
admin.site.register(Role, RoleAdmin)
admin.site.register(Team, TeamAdmin)
admin.site.register(Organization, OrganizationAdmin)
| true | true |
f726dea29d24103ee493a83474a24f027af1befb | 11,256 | py | Python | theano/gof/tests/test_destroyhandler.py | mdda/Theano | 6ca7b2b65000e371f009b617d41bc5a90f022d38 | [
"BSD-3-Clause"
] | null | null | null | theano/gof/tests/test_destroyhandler.py | mdda/Theano | 6ca7b2b65000e371f009b617d41bc5a90f022d38 | [
"BSD-3-Clause"
] | null | null | null | theano/gof/tests/test_destroyhandler.py | mdda/Theano | 6ca7b2b65000e371f009b617d41bc5a90f022d38 | [
"BSD-3-Clause"
] | null | null | null | from __future__ import print_function
from six.moves import xrange
from theano.gof.type import Type
from theano.gof import graph
from theano.gof.graph import Variable, Apply
from theano.gof.op import Op
from theano.gof.opt import * # noqa
from theano.gof import destroyhandler
from theano.gof.fg import FunctionGraph, InconsistencyError
from theano.gof.toolbox import ReplaceValidate
from copy import copy
def PatternOptimizer(p1, p2, ign=True):
return OpKeyOptimizer(PatternSub(p1, p2), ignore_newtrees=ign)
def OpSubOptimizer(op1, op2, fail=NavigatorOptimizer.warn_ignore, ign=True):
return TopoOptimizer(OpSub(op1, op2),
ignore_newtrees=ign, failure_callback=fail)
def as_variable(x):
assert isinstance(x, Variable)
return x
class MyType(Type):
def filter(self, data):
return data
def __eq__(self, other):
return isinstance(other, MyType)
def MyVariable(name):
return Variable(MyType(), None, None, name=name)
def MyConstant(data):
return graph.Constant(MyType(), data=data)
class MyOp(Op):
def __init__(self, nin, name, vmap=None, dmap=None, nout=1,
destroyhandler_tolerate_same=None,
destroyhandler_tolerate_aliased=None):
if vmap is None:
vmap = {}
if dmap is None:
dmap = {}
if destroyhandler_tolerate_same is None:
destroyhandler_tolerate_same = []
if destroyhandler_tolerate_aliased is None:
destroyhandler_tolerate_aliased = []
self.nin = nin
self.nout = nout
self.name = name
self.destroy_map = dmap
self.view_map = vmap
self.destroyhandler_tolerate_same = destroyhandler_tolerate_same
self.destroyhandler_tolerate_aliased = destroyhandler_tolerate_aliased
def make_node(self, *inputs):
assert len(inputs) == self.nin
inputs = list(map(as_variable, inputs))
for input in inputs:
if not isinstance(input.type, MyType):
raise Exception("Error 1")
outputs = [MyVariable(self.name + "_R") for i in xrange(self.nout)]
return Apply(self, inputs, outputs)
def __str__(self):
return self.name
sigmoid = MyOp(1, 'Sigmoid')
transpose_view = MyOp(1, 'TransposeView', vmap={0: [0]})
add = MyOp(2, 'Add')
add_in_place = MyOp(2, 'AddInPlace', dmap={0: [0]})
add_in_place_2 = MyOp(2, 'AddInPlace', dmap={0: [0]},
destroyhandler_tolerate_same=[(0, 1)])
add_in_place_3 = MyOp(2, 'AddInPlace', dmap={0: [0]},
destroyhandler_tolerate_aliased=[(0, 1)])
dot = MyOp(2, 'Dot')
def inputs():
x = MyVariable('x')
y = MyVariable('y')
z = MyVariable('z')
return x, y, z
def Env(inputs, outputs, validate=True):
e = FunctionGraph(inputs, outputs, clone=False)
e.attach_feature(destroyhandler.DestroyHandler())
e.attach_feature(ReplaceValidate())
if validate:
e.validate()
return e
class FailureWatch:
# when passed to OpSubOptimizer or PatternOptimizer, counts the
# number of failures
def __init__(self):
self.failures = 0
def __call__(self, exc, nav, pairs, lopt, node):
assert isinstance(exc, InconsistencyError)
self.failures += 1
def consistent(g):
# print "Testing consistent:", g
try:
assert g.consistent()
except AssertionError:
print("Test failed! The graph was marked as NOT consistent.")
raise
# print "Test OK"
def inconsistent(g):
# print "Testing NOT consistent:", g
try:
assert not g.consistent()
except AssertionError:
print("Test failed! The graph was marked as consistent.")
raise
# print "Test OK"
#################
# Test protocol #
#################
def test_misc():
x, y, z = inputs()
e = transpose_view(transpose_view(transpose_view(transpose_view(x))))
g = Env([x, y, z], [e])
consistent(g)
chk = g.checkpoint()
PatternOptimizer((transpose_view, (transpose_view, 'x')), 'x').optimize(g)
assert str(g) == "[x]"
new_e = add(x, y)
g.replace_validate(x, new_e)
assert str(g) == "[Add(x, y)]"
g.replace(new_e, dot(add_in_place(x, y), transpose_view(x)))
assert str(g) == "[Dot(AddInPlace(x, y), TransposeView(x))]"
inconsistent(g)
g.revert(chk)
consistent(g)
assert str(g) == "[TransposeView(TransposeView(TransposeView(TransposeView(x))))]"
######################
# Test protocol skip #
######################
def test_aliased_inputs_replacement():
x, y, z = inputs()
tv = transpose_view(x)
tvv = transpose_view(tv)
sx = sigmoid(x)
e = add_in_place(x, tv)
g = Env([x, y], [e], False)
inconsistent(g)
g.replace(tv, sx)
consistent(g)
g.replace(sx, tv)
inconsistent(g)
g.replace(tv, tvv)
inconsistent(g)
g.replace(tv, sx)
consistent(g)
def test_indestructible():
x, y, z = inputs()
x.tag.indestructible = True
x = copy(x)
# checking if indestructible survives the copy!
assert x.tag.indestructible
e = add_in_place(x, y)
g = Env([x, y, z], [e], False)
inconsistent(g)
g.replace_validate(e, add(x, y))
consistent(g)
def test_usage_loop_through_views_2():
x, y, z = inputs()
e0 = transpose_view(transpose_view(sigmoid(x)))
e = dot(add_in_place(x, y), transpose_view(e0))
g = Env([x, y, z], [e])
consistent(g) # because sigmoid can do the copy
g.replace(e0, x)
inconsistent(g) # we cut off the path to the sigmoid
def test_destroyers_loop():
# AddInPlace(x, y) and AddInPlace(y, x) should not coexist
x, y, z = inputs()
e1 = add(x, y)
e2 = add(y, x)
g = Env([x, y, z], [e1, e2])
chk = g.checkpoint()
consistent(g)
g.replace_validate(e1, add_in_place(x, y))
consistent(g)
try:
g.replace_validate(e2, add_in_place(y, x))
raise Exception("Shouldn't have reached this point.")
except InconsistencyError:
pass
consistent(g)
g.revert(chk)
g.replace_validate(e2, add_in_place(y, x))
consistent(g)
try:
g.replace_validate(e1, add_in_place(x, y))
raise Exception("Shouldn't have reached this point.")
except InconsistencyError:
pass
consistent(g)
########
# Misc #
########
def test_aliased_inputs():
x, y, z = inputs()
e = add_in_place(x, x)
g = Env([x], [e], False)
inconsistent(g)
def test_aliased_inputs2():
x, y, z = inputs()
e = add_in_place(x, transpose_view(x))
g = Env([x], [e], False)
inconsistent(g)
def test_aliased_inputs_tolerate():
x, y, z = inputs()
e = add_in_place_2(x, x)
g = Env([x], [e], False)
consistent(g)
def test_aliased_inputs_tolerate2():
x, y, z = inputs()
e = add_in_place_2(x, transpose_view(x))
g = Env([x], [e], False)
inconsistent(g)
def test_same_aliased_inputs_ignored():
x, y, z = inputs()
e = add_in_place_3(x, x)
g = Env([x], [e], False)
consistent(g)
def test_different_aliased_inputs_ignored():
x, y, z = inputs()
e = add_in_place_3(x, transpose_view(x))
g = Env([x], [e], False)
consistent(g)
# warning - don't run this because it would produce the wrong answer
# add_in_place_3 is actually not correct when aliasing of inputs
# is ignored.
def test_indestructible_through_views():
x, y, z = inputs()
x.tag.indestructible = True
tv = transpose_view(x)
e = add_in_place(tv, y)
g = Env([x, y, z], [e], False)
inconsistent(g)
g.replace_validate(tv, sigmoid(x))
consistent(g)
def test_indirect():
x, y, z = inputs()
e0 = add_in_place(x, y)
e = dot(sigmoid(e0), transpose_view(x))
g = Env([x, y, z], [e], False)
inconsistent(g)
new_e0 = add(x, y)
g.replace(e0, new_e0)
consistent(g)
g.replace(new_e0, add_in_place(x, y))
inconsistent(g)
def test_indirect_2():
x, y, z = inputs()
e0 = transpose_view(x)
e = dot(sigmoid(add_in_place(x, y)), e0)
g = Env([x, y, z], [e], False)
inconsistent(g)
new_e0 = add(e0, y)
g.replace(e0, new_e0)
consistent(g)
def test_long_destroyers_loop():
x, y, z = inputs()
e = dot(dot(add_in_place(x, y),
add_in_place(y, z)),
add(z, x))
g = Env([x, y, z], [e])
consistent(g)
OpSubOptimizer(add, add_in_place).optimize(g)
consistent(g)
# we don't want to see that!
assert str(g) != "[Dot(Dot(AddInPlace(x, y), AddInPlace(y, z)), AddInPlace(z, x))]"
e2 = dot(dot(add_in_place(x, y),
add_in_place(y, z)),
add_in_place(z, x))
try:
Env(*graph.clone([x, y, z], [e2]))
raise Exception("Shouldn't have reached this point.")
except InconsistencyError:
pass
def test_misc_2():
x, y, z = inputs()
tv = transpose_view(x)
e = add_in_place(x, tv)
g = Env([x, y], [e], False)
inconsistent(g)
g.replace(tv, x)
inconsistent(g)
def test_multi_destroyers():
x, y, z = inputs()
e = add(add_in_place(x, y), add_in_place(x, y))
try:
Env([x, y, z], [e])
raise Exception("Shouldn't have reached this point.")
except InconsistencyError as e:
pass
def test_multi_destroyers_through_views():
x, y, z = inputs()
e = dot(add(transpose_view(z), y), add(z, x))
g = Env([x, y, z], [e])
consistent(g)
fail = FailureWatch()
OpSubOptimizer(add, add_in_place, fail).optimize(g)
consistent(g)
assert fail.failures == 1 # should have succeeded once and failed once
def test_repair_destroy_path():
x, y, z = inputs()
e1 = transpose_view(transpose_view(x))
e2 = transpose_view(transpose_view(e1))
e3 = add_in_place(e2, y)
e4 = add_in_place(e1, z)
g = Env([x, y, z], [e3, e4], False)
inconsistent(g)
g.replace(e2, transpose_view(x))
inconsistent(g)
def test_usage_loop():
x, y, z = inputs()
g = Env([x, y, z], [dot(add_in_place(x, z), x)], False)
inconsistent(g)
# replace add_in_place with add
OpSubOptimizer(add_in_place, add).optimize(g)
consistent(g)
def test_usage_loop_through_views():
x, y, z = inputs()
aip = add_in_place(x, y)
e = dot(aip, transpose_view(x))
g = Env([x, y, z], [e], False)
inconsistent(g)
g.replace_validate(aip, add(x, z))
consistent(g)
def test_usage_loop_insert_views():
x, y, z = inputs()
e = dot(add_in_place(x, add(y, z)),
sigmoid(sigmoid(sigmoid(sigmoid(sigmoid(x))))))
g = Env([x, y, z], [e])
consistent(g)
fail = FailureWatch()
OpSubOptimizer(sigmoid, transpose_view, fail).optimize(g)
consistent(g)
# it must keep one sigmoid in the long sigmoid chain
assert fail.failures == 1
def test_value_repl():
x, y, z = inputs()
sy = sigmoid(y)
e = add_in_place(x, sy)
g = Env([x, y], [e], False)
consistent(g)
g.replace(sy, MyConstant("abc"))
consistent(g)
def test_value_repl_2():
x, y, z = inputs()
sy = sigmoid(y)
e = add_in_place(x, sy)
g = Env([x, y], [e], False)
consistent(g)
g.replace(sy, transpose_view(MyConstant("abc")))
consistent(g)
| 25.875862 | 87 | 0.613184 | from __future__ import print_function
from six.moves import xrange
from theano.gof.type import Type
from theano.gof import graph
from theano.gof.graph import Variable, Apply
from theano.gof.op import Op
from theano.gof.opt import *
from theano.gof import destroyhandler
from theano.gof.fg import FunctionGraph, InconsistencyError
from theano.gof.toolbox import ReplaceValidate
from copy import copy
def PatternOptimizer(p1, p2, ign=True):
return OpKeyOptimizer(PatternSub(p1, p2), ignore_newtrees=ign)
def OpSubOptimizer(op1, op2, fail=NavigatorOptimizer.warn_ignore, ign=True):
return TopoOptimizer(OpSub(op1, op2),
ignore_newtrees=ign, failure_callback=fail)
def as_variable(x):
assert isinstance(x, Variable)
return x
class MyType(Type):
def filter(self, data):
return data
def __eq__(self, other):
return isinstance(other, MyType)
def MyVariable(name):
return Variable(MyType(), None, None, name=name)
def MyConstant(data):
return graph.Constant(MyType(), data=data)
class MyOp(Op):
def __init__(self, nin, name, vmap=None, dmap=None, nout=1,
destroyhandler_tolerate_same=None,
destroyhandler_tolerate_aliased=None):
if vmap is None:
vmap = {}
if dmap is None:
dmap = {}
if destroyhandler_tolerate_same is None:
destroyhandler_tolerate_same = []
if destroyhandler_tolerate_aliased is None:
destroyhandler_tolerate_aliased = []
self.nin = nin
self.nout = nout
self.name = name
self.destroy_map = dmap
self.view_map = vmap
self.destroyhandler_tolerate_same = destroyhandler_tolerate_same
self.destroyhandler_tolerate_aliased = destroyhandler_tolerate_aliased
def make_node(self, *inputs):
assert len(inputs) == self.nin
inputs = list(map(as_variable, inputs))
for input in inputs:
if not isinstance(input.type, MyType):
raise Exception("Error 1")
outputs = [MyVariable(self.name + "_R") for i in xrange(self.nout)]
return Apply(self, inputs, outputs)
def __str__(self):
return self.name
sigmoid = MyOp(1, 'Sigmoid')
transpose_view = MyOp(1, 'TransposeView', vmap={0: [0]})
add = MyOp(2, 'Add')
add_in_place = MyOp(2, 'AddInPlace', dmap={0: [0]})
add_in_place_2 = MyOp(2, 'AddInPlace', dmap={0: [0]},
destroyhandler_tolerate_same=[(0, 1)])
add_in_place_3 = MyOp(2, 'AddInPlace', dmap={0: [0]},
destroyhandler_tolerate_aliased=[(0, 1)])
dot = MyOp(2, 'Dot')
def inputs():
x = MyVariable('x')
y = MyVariable('y')
z = MyVariable('z')
return x, y, z
def Env(inputs, outputs, validate=True):
e = FunctionGraph(inputs, outputs, clone=False)
e.attach_feature(destroyhandler.DestroyHandler())
e.attach_feature(ReplaceValidate())
if validate:
e.validate()
return e
class FailureWatch:
def __init__(self):
self.failures = 0
def __call__(self, exc, nav, pairs, lopt, node):
assert isinstance(exc, InconsistencyError)
self.failures += 1
def consistent(g):
try:
assert g.consistent()
except AssertionError:
print("Test failed! The graph was marked as NOT consistent.")
raise
def inconsistent(g):
try:
assert not g.consistent()
except AssertionError:
print("Test failed! The graph was marked as consistent.")
raise
assert str(g) == "[x]"
new_e = add(x, y)
g.replace_validate(x, new_e)
assert str(g) == "[Add(x, y)]"
g.replace(new_e, dot(add_in_place(x, y), transpose_view(x)))
assert str(g) == "[Dot(AddInPlace(x, y), TransposeView(x))]"
inconsistent(g)
g.revert(chk)
consistent(g)
assert str(g) == "[TransposeView(TransposeView(TransposeView(TransposeView(x))))]"
e = True
x = copy(x)
assert x.tag.indestructible
e = add_in_place(x, y)
g = Env([x, y, z], [e], False)
inconsistent(g)
g.replace_validate(e, add(x, y))
consistent(g)
def test_usage_loop_through_views_2():
x, y, z = inputs()
e0 = transpose_view(transpose_view(sigmoid(x)))
e = dot(add_in_place(x, y), transpose_view(e0))
g = Env([x, y, z], [e])
consistent(g)
g.replace(e0, x)
inconsistent(g)
def test_destroyers_loop():
x, y, z = inputs()
e1 = add(x, y)
e2 = add(y, x)
g = Env([x, y, z], [e1, e2])
chk = g.checkpoint()
consistent(g)
g.replace_validate(e1, add_in_place(x, y))
consistent(g)
try:
g.replace_validate(e2, add_in_place(y, x))
raise Exception("Shouldn't have reached this point.")
except InconsistencyError:
pass
consistent(g)
g.revert(chk)
g.replace_validate(e2, add_in_place(y, x))
consistent(g)
try:
g.replace_validate(e1, add_in_place(x, y))
raise Exception("Shouldn't have reached this point.")
except InconsistencyError:
pass
consistent(g)
e = add_in_place(x, x)
g = Env([x], [e], False)
inconsistent(g)
def test_aliased_inputs2():
x, y, z = inputs()
e = add_in_place(x, transpose_view(x))
g = Env([x], [e], False)
inconsistent(g)
def test_aliased_inputs_tolerate():
x, y, z = inputs()
e = add_in_place_2(x, x)
g = Env([x], [e], False)
consistent(g)
def test_aliased_inputs_tolerate2():
x, y, z = inputs()
e = add_in_place_2(x, transpose_view(x))
g = Env([x], [e], False)
inconsistent(g)
def test_same_aliased_inputs_ignored():
x, y, z = inputs()
e = add_in_place_3(x, x)
g = Env([x], [e], False)
consistent(g)
def test_different_aliased_inputs_ignored():
x, y, z = inputs()
e = add_in_place_3(x, transpose_view(x))
g = Env([x], [e], False)
consistent(g)
# add_in_place_3 is actually not correct when aliasing of inputs
# is ignored.
def test_indestructible_through_views():
x, y, z = inputs()
x.tag.indestructible = True
tv = transpose_view(x)
e = add_in_place(tv, y)
g = Env([x, y, z], [e], False)
inconsistent(g)
g.replace_validate(tv, sigmoid(x))
consistent(g)
def test_indirect():
x, y, z = inputs()
e0 = add_in_place(x, y)
e = dot(sigmoid(e0), transpose_view(x))
g = Env([x, y, z], [e], False)
inconsistent(g)
new_e0 = add(x, y)
g.replace(e0, new_e0)
consistent(g)
g.replace(new_e0, add_in_place(x, y))
inconsistent(g)
def test_indirect_2():
x, y, z = inputs()
e0 = transpose_view(x)
e = dot(sigmoid(add_in_place(x, y)), e0)
g = Env([x, y, z], [e], False)
inconsistent(g)
new_e0 = add(e0, y)
g.replace(e0, new_e0)
consistent(g)
def test_long_destroyers_loop():
x, y, z = inputs()
e = dot(dot(add_in_place(x, y),
add_in_place(y, z)),
add(z, x))
g = Env([x, y, z], [e])
consistent(g)
OpSubOptimizer(add, add_in_place).optimize(g)
consistent(g)
# we don't want to see that!
assert str(g) != "[Dot(Dot(AddInPlace(x, y), AddInPlace(y, z)), AddInPlace(z, x))]"
e2 = dot(dot(add_in_place(x, y),
add_in_place(y, z)),
add_in_place(z, x))
try:
Env(*graph.clone([x, y, z], [e2]))
raise Exception("Shouldn't have reached this point.")
except InconsistencyError:
pass
def test_misc_2():
x, y, z = inputs()
tv = transpose_view(x)
e = add_in_place(x, tv)
g = Env([x, y], [e], False)
inconsistent(g)
g.replace(tv, x)
inconsistent(g)
def test_multi_destroyers():
x, y, z = inputs()
e = add(add_in_place(x, y), add_in_place(x, y))
try:
Env([x, y, z], [e])
raise Exception("Shouldn't have reached this point.")
except InconsistencyError as e:
pass
def test_multi_destroyers_through_views():
x, y, z = inputs()
e = dot(add(transpose_view(z), y), add(z, x))
g = Env([x, y, z], [e])
consistent(g)
fail = FailureWatch()
OpSubOptimizer(add, add_in_place, fail).optimize(g)
consistent(g)
assert fail.failures == 1
def test_repair_destroy_path():
x, y, z = inputs()
e1 = transpose_view(transpose_view(x))
e2 = transpose_view(transpose_view(e1))
e3 = add_in_place(e2, y)
e4 = add_in_place(e1, z)
g = Env([x, y, z], [e3, e4], False)
inconsistent(g)
g.replace(e2, transpose_view(x))
inconsistent(g)
def test_usage_loop():
x, y, z = inputs()
g = Env([x, y, z], [dot(add_in_place(x, z), x)], False)
inconsistent(g)
OpSubOptimizer(add_in_place, add).optimize(g)
consistent(g)
def test_usage_loop_through_views():
x, y, z = inputs()
aip = add_in_place(x, y)
e = dot(aip, transpose_view(x))
g = Env([x, y, z], [e], False)
inconsistent(g)
g.replace_validate(aip, add(x, z))
consistent(g)
def test_usage_loop_insert_views():
x, y, z = inputs()
e = dot(add_in_place(x, add(y, z)),
sigmoid(sigmoid(sigmoid(sigmoid(sigmoid(x))))))
g = Env([x, y, z], [e])
consistent(g)
fail = FailureWatch()
OpSubOptimizer(sigmoid, transpose_view, fail).optimize(g)
consistent(g)
assert fail.failures == 1
def test_value_repl():
x, y, z = inputs()
sy = sigmoid(y)
e = add_in_place(x, sy)
g = Env([x, y], [e], False)
consistent(g)
g.replace(sy, MyConstant("abc"))
consistent(g)
def test_value_repl_2():
x, y, z = inputs()
sy = sigmoid(y)
e = add_in_place(x, sy)
g = Env([x, y], [e], False)
consistent(g)
g.replace(sy, transpose_view(MyConstant("abc")))
consistent(g)
| true | true |
f726ded0f21d12ce2859ff426b0a1110e948ea9e | 3,510 | py | Python | inlp/tag/ltp.py | IgowWang/IgorNLP | 3d1bd119bed19f386f30ca1ad4bad98f4200661a | [
"Apache-2.0"
] | 2 | 2016-02-26T09:13:58.000Z | 2017-01-28T13:15:19.000Z | inlp/tag/ltp.py | IgowWang/IgorNLP | 3d1bd119bed19f386f30ca1ad4bad98f4200661a | [
"Apache-2.0"
] | null | null | null | inlp/tag/ltp.py | IgowWang/IgorNLP | 3d1bd119bed19f386f30ca1ad4bad98f4200661a | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# IgorNLP:ltp 词性标注模块
#
# Author: Igor
import os
import tempfile
from subprocess import PIPE
from nltk.internals import overridden, compat
from inlp.tag.api import TaggerI
from inlp.utils import ltp_cmd
class LtpPosTagger(TaggerI):
'''
ltp 词性标注模块
#test:
sentences = [['这', '是', '哈工大', '分词器', '。'], ['哈工大', '的', '分词器', '测试']]
path_ltp = '/home/igor/PycharmProjects/ltp'
ltpTagger = LtpPosTagger(path_to_ltp=path_ltp)
print(ltpTagger.tag_sents(sentences))
print(ltpTagger.tag(['这', '是', '哈工大', '分词器', '。']))
output:
[[('这', 'r'), ('是', 'v'), ('哈工大', 'j'), ('分词器', 'n'), ('。', 'wp')], [('哈工大', 'j'), ('的', 'u'), ('分词器', 'n'), ('测试', 'v')]]
[('这', 'r'), ('是', 'v'), ('哈工大', 'j'), ('分词器', 'n'), ('。', 'wp')]
'''
def __init__(self, path_to_ltp, path_to_model=None, path_to_lexicon=None, threads=1,
encoding='utf8'):
'''
初始化分词模型:指定ltp的位置
:param path_to_ltp: ltp工程的根目录
:param path_to_model: ltp词性标注模型
:param path_to_lexicon: 人工添加指定的词典
'''
self._path_to_ltp = path_to_ltp
self._path_to_model = path_to_model
self._path_to_lexicon = path_to_lexicon
self._threads = threads
self._encoding = encoding
def tag_file(self, input_file_path):
'''
为分词后的文件进行词性标注
构造cmd命令,执行返回标准输出
:param input_file_path:输入的文件
:return:分词后的结果,保留ltp标注后的结果,方便调用下一个部件
'''
if self._path_to_model is None:
self._path_to_model = os.path.join(self._path_to_ltp, 'ltp_data/pos.model')
cws_cmdline = os.path.join(self._path_to_ltp, 'bin/examples/pos_cmdline')
cmd = [
cws_cmdline,
'--input', input_file_path,
'--threads', repr(self._threads),
'--postagger-model', self._path_to_model,
]
if self._path_to_lexicon:
cmd.extend(['--postagger-lexicon', self._path_to_lexicon])
stdout = self._execute(cmd)
return stdout
def tag(self, tokens):
'''
标注单个句子
:param tokens:list
:return:list(tuple(str,str))
'''
if overridden(self.tag_sents):
return self.tag_sents([tokens])[0]
else:
raise NotImplementedError()
def tag_sents(self, sentences):
encoding = self._encoding
# create temporary input file
_input_fh, self._input_file_path = tempfile.mkstemp(text=True)
# Write the actural sentences to the temporary input file
_input_fh = os.fdopen(_input_fh, 'wb')
_input = '\n'.join('\t'.join(x) for x in sentences)
if isinstance(_input, compat.text_type) and encoding:
_input = _input.encode(encoding)
_input_fh.write(_input)
_input_fh.close()
stdout = self.tag_file(self._input_file_path)
return [[tuple(token.split('_')) for token in sent.split('\t')]
for sent in stdout.strip().split('\n')]
def _execute(self, cmd):
encoding = self._encoding
stdout, _stderr = ltp_cmd(cmd, stdout=PIPE, stderr=PIPE)
stdout = stdout.decode(encoding)
return stdout
if __name__ == '__main__':
sentences = [['这', '是', '哈工大', '分词器', '。'], ['哈工大', '的', '分词器', '测试']]
path_ltp = '/home/igor/PycharmProjects/ltp'
ltpTagger = LtpPosTagger(path_to_ltp=path_ltp)
print(ltpTagger.tag_sents(sentences))
print(ltpTagger.tag(['这', '是', '哈工大', '分词器', '。']))
| 30.258621 | 126 | 0.582906 |
import os
import tempfile
from subprocess import PIPE
from nltk.internals import overridden, compat
from inlp.tag.api import TaggerI
from inlp.utils import ltp_cmd
class LtpPosTagger(TaggerI):
def __init__(self, path_to_ltp, path_to_model=None, path_to_lexicon=None, threads=1,
encoding='utf8'):
self._path_to_ltp = path_to_ltp
self._path_to_model = path_to_model
self._path_to_lexicon = path_to_lexicon
self._threads = threads
self._encoding = encoding
def tag_file(self, input_file_path):
if self._path_to_model is None:
self._path_to_model = os.path.join(self._path_to_ltp, 'ltp_data/pos.model')
cws_cmdline = os.path.join(self._path_to_ltp, 'bin/examples/pos_cmdline')
cmd = [
cws_cmdline,
'--input', input_file_path,
'--threads', repr(self._threads),
'--postagger-model', self._path_to_model,
]
if self._path_to_lexicon:
cmd.extend(['--postagger-lexicon', self._path_to_lexicon])
stdout = self._execute(cmd)
return stdout
def tag(self, tokens):
if overridden(self.tag_sents):
return self.tag_sents([tokens])[0]
else:
raise NotImplementedError()
def tag_sents(self, sentences):
encoding = self._encoding
_input_fh, self._input_file_path = tempfile.mkstemp(text=True)
_input_fh = os.fdopen(_input_fh, 'wb')
_input = '\n'.join('\t'.join(x) for x in sentences)
if isinstance(_input, compat.text_type) and encoding:
_input = _input.encode(encoding)
_input_fh.write(_input)
_input_fh.close()
stdout = self.tag_file(self._input_file_path)
return [[tuple(token.split('_')) for token in sent.split('\t')]
for sent in stdout.strip().split('\n')]
def _execute(self, cmd):
encoding = self._encoding
stdout, _stderr = ltp_cmd(cmd, stdout=PIPE, stderr=PIPE)
stdout = stdout.decode(encoding)
return stdout
if __name__ == '__main__':
sentences = [['这', '是', '哈工大', '分词器', '。'], ['哈工大', '的', '分词器', '测试']]
path_ltp = '/home/igor/PycharmProjects/ltp'
ltpTagger = LtpPosTagger(path_to_ltp=path_ltp)
print(ltpTagger.tag_sents(sentences))
print(ltpTagger.tag(['这', '是', '哈工大', '分词器', '。']))
| true | true |
f726df462e44abc76e9c11946685af130da6d59c | 96 | py | Python | boa3_test/test_sc/relational_test/BoolEquality.py | hal0x2328/neo3-boa | 6825a3533384cb01660773050719402a9703065b | [
"Apache-2.0"
] | 25 | 2020-07-22T19:37:43.000Z | 2022-03-08T03:23:55.000Z | boa3_test/test_sc/relational_test/BoolEquality.py | hal0x2328/neo3-boa | 6825a3533384cb01660773050719402a9703065b | [
"Apache-2.0"
] | 419 | 2020-04-23T17:48:14.000Z | 2022-03-31T13:17:45.000Z | boa3_test/test_sc/relational_test/BoolEquality.py | hal0x2328/neo3-boa | 6825a3533384cb01660773050719402a9703065b | [
"Apache-2.0"
] | 15 | 2020-05-21T21:54:24.000Z | 2021-11-18T06:17:24.000Z | from boa3.builtin import public
@public
def Main(a: bool, b: bool) -> bool:
return a == b
| 13.714286 | 35 | 0.645833 | from boa3.builtin import public
@public
def Main(a: bool, b: bool) -> bool:
return a == b
| true | true |
f726e11a06f3a64832e31beeb29cda0f35f7559f | 12,310 | py | Python | tasks.py | nautobot/nautobot-plugin-chatops-aci | d5e92cbaa261e4fbcb175131d03fc6f4e63bc241 | [
"Apache-2.0"
] | null | null | null | tasks.py | nautobot/nautobot-plugin-chatops-aci | d5e92cbaa261e4fbcb175131d03fc6f4e63bc241 | [
"Apache-2.0"
] | 4 | 2021-12-01T19:20:21.000Z | 2022-02-24T22:05:18.000Z | tasks.py | nautobot/nautobot-plugin-chatops-aci | d5e92cbaa261e4fbcb175131d03fc6f4e63bc241 | [
"Apache-2.0"
] | 1 | 2022-01-06T16:37:34.000Z | 2022-01-06T16:37:34.000Z | """Tasks for use with Invoke.
(c) 2020-2021 Network To Code
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from distutils.util import strtobool
from invoke import Collection, task as invoke_task
import os
def is_truthy(arg):
"""Convert "truthy" strings into Booleans.
Examples:
>>> is_truthy('yes')
True
Args:
arg (str): Truthy string (True values are y, yes, t, true, on and 1; false values are n, no,
f, false, off and 0. Raises ValueError if val is anything else.
"""
if isinstance(arg, bool):
return arg
return bool(strtobool(arg))
# Use pyinvoke configuration for default values, see http://docs.pyinvoke.org/en/stable/concepts/configuration.html
# Variables may be overwritten in invoke.yml or by the environment variables INVOKE_NAUTOBOT_PLUGIN_CHATOPS_aci_xxx
namespace = Collection("nautobot_plugin_chatops_aci")
namespace.configure(
{
"nautobot_plugin_chatops_aci": {
"nautobot_ver": "latest",
"project_name": "nautobot-plugin-chatops-aci",
"python_ver": "3.8",
"local": False,
"compose_dir": os.path.join(os.path.dirname(__file__), "development"),
"compose_files": [
"docker-compose.requirements.yml",
"docker-compose.base.yml",
"docker-compose.dev.yml",
],
}
}
)
def task(function=None, *args, **kwargs):
"""Task decorator to override the default Invoke task decorator and add each task to the invoke namespace."""
def task_wrapper(function=None):
"""Wrapper around invoke.task to add the task to the namespace as well."""
if args or kwargs:
task_func = invoke_task(*args, **kwargs)(function)
else:
task_func = invoke_task(function)
namespace.add_task(task_func)
return task_func
if function:
# The decorator was called with no arguments
return task_wrapper(function)
# The decorator was called with arguments
return task_wrapper
def docker_compose(context, command, **kwargs):
"""Helper function for running a specific docker-compose command with all appropriate parameters and environment.
Args:
context (obj): Used to run specific commands
command (str): Command string to append to the "docker-compose ..." command, such as "build", "up", etc.
**kwargs: Passed through to the context.run() call.
"""
build_env = {
"NAUTOBOT_VER": context.nautobot_plugin_chatops_aci.nautobot_ver,
"PYTHON_VER": context.nautobot_plugin_chatops_aci.python_ver,
}
compose_command = f'docker-compose --project-name {context.nautobot_plugin_chatops_aci.project_name} --project-directory "{context.nautobot_plugin_chatops_aci.compose_dir}"'
for compose_file in context.nautobot_plugin_chatops_aci.compose_files:
compose_file_path = os.path.join(context.nautobot_plugin_chatops_aci.compose_dir, compose_file)
compose_command += f' -f "{compose_file_path}"'
compose_command += f" {command}"
print(f'Running docker-compose command "{command}"')
return context.run(compose_command, env=build_env, **kwargs)
def run_command(context, command, **kwargs):
"""Wrapper to run a command locally or inside the nautobot container."""
if is_truthy(context.nautobot_plugin_chatops_aci.local):
context.run(command, **kwargs)
else:
# Check if netbox is running, no need to start another netbox container to run a command
docker_compose_status = "ps --services --filter status=running"
results = docker_compose(context, docker_compose_status, hide="out")
if "nautobot" in results.stdout:
compose_command = f"exec nautobot {command}"
else:
compose_command = f"run --entrypoint '{command}' nautobot"
docker_compose(context, compose_command, pty=True)
# ------------------------------------------------------------------------------
# BUILD
# ------------------------------------------------------------------------------
@task(
help={
"force_rm": "Always remove intermediate containers",
"cache": "Whether to use Docker's cache when building the image (defaults to enabled)",
}
)
def build(context, force_rm=False, cache=True):
"""Build Nautobot docker image."""
command = "build"
if not cache:
command += " --no-cache"
if force_rm:
command += " --force-rm"
print(f"Building Nautobot with Python {context.nautobot_plugin_chatops_aci.python_ver}...")
docker_compose(context, command)
@task
def generate_packages(context):
"""Generate all Python packages inside docker and copy the file locally under dist/."""
command = "poetry build"
run_command(context, command)
# ------------------------------------------------------------------------------
# START / STOP / DEBUG
# ------------------------------------------------------------------------------
@task
def debug(context):
"""Start Nautobot and its dependencies in debug mode."""
print("Starting Nautobot in debug mode...")
docker_compose(context, "up")
@task
def start(context):
"""Start Nautobot and its dependencies in detached mode."""
print("Starting Nautobot in detached mode...")
docker_compose(context, "up --detach")
@task
def restart(context):
"""Gracefully restart all containers."""
print("Restarting Nautobot...")
docker_compose(context, "restart")
@task
def stop(context):
"""Stop Nautobot and its dependencies."""
print("Stopping Nautobot...")
docker_compose(context, "down")
@task
def destroy(context):
"""Destroy all containers and volumes."""
print("Destroying Nautobot...")
docker_compose(context, "down --volumes")
@task
def vscode(context):
"""Launch Visual Studio Code with the appropriate Environment variables to run in a container."""
command = "code nautobot.code-workspace"
context.run(command)
# ------------------------------------------------------------------------------
# ACTIONS
# ------------------------------------------------------------------------------
@task
def nbshell(context):
"""Launch an interactive nbshell session."""
command = "nautobot-server nbshell"
run_command(context, command)
@task
def cli(context):
"""Launch a bash shell inside the running Nautobot container."""
run_command(context, "bash")
@task(
help={
"user": "name of the superuser to create (default: admin)",
}
)
def createsuperuser(context, user="admin"):
"""Create a new Nautobot superuser account (default: "admin"), will prompt for password."""
command = f"nautobot-server createsuperuser --username {user}"
run_command(context, command)
@task(
help={
"name": "name of the migration to be created; if unspecified, will autogenerate a name",
}
)
def makemigrations(context, name=""):
"""Perform makemigrations operation in Django."""
command = "nautobot-server makemigrations nautobot_plugin_chatops_aci"
if name:
command += f" --name {name}"
run_command(context, command)
@task
def migrate(context):
"""Perform migrate operation in Django."""
command = "nautobot-server migrate"
run_command(context, command)
@task(help={})
def post_upgrade(context):
"""
Performs Nautobot common post-upgrade operations using a single entrypoint.
This will run the following management commands with default settings, in order:
- migrate
- trace_paths
- collectstatic
- remove_stale_contenttypes
- clearsessions
- invalidate all
"""
command = "nautobot-server post_upgrade"
run_command(context, command)
# ------------------------------------------------------------------------------
# TESTS
# ------------------------------------------------------------------------------
@task(
help={
"autoformat": "Apply formatting recommendations automatically, rather than failing if formatting is incorrect.",
}
)
def black(context, autoformat=False):
"""Check Python code style with Black."""
if autoformat:
black_command = "black"
else:
black_command = "black --check --diff"
command = f"{black_command} ."
run_command(context, command)
@task
def flake8(context):
"""Check for PEP8 compliance and other style issues."""
command = "flake8 ."
run_command(context, command)
@task
def hadolint(context):
"""Check Dockerfile for hadolint compliance and other style issues."""
command = "hadolint development/Dockerfile"
run_command(context, command)
@task
def pylint(context):
"""Run pylint code analysis."""
command = (
'pylint --init-hook "import nautobot; nautobot.setup()" --rcfile pyproject.toml nautobot_plugin_chatops_aci'
)
run_command(context, command)
@task
def pydocstyle(context):
"""Run pydocstyle to validate docstring formatting adheres to NTC defined standards."""
# We exclude the /migrations/ directory since it is autogenerated code
command = "pydocstyle ."
run_command(context, command)
@task
def yamllint(context):
"""Run yamllint to validate formating adheres to NTC defined YAML standards.
Args:
context (obj): Used to run specific commands
"""
command = "yamllint . --format standard"
run_command(context, command)
@task
def bandit(context):
"""Run bandit to validate basic static code security analysis."""
command = "bandit --recursive . --configfile .bandit.yml"
run_command(context, command)
@task
def check_migrations(context):
"""Check for missing migrations."""
command = "nautobot-server --config=nautobot/core/tests/nautobot_config.py makemigrations --dry-run --check"
run_command(context, command)
@task(
help={
"keepdb": "save and re-use test database between test runs for faster re-testing.",
"label": "specify a directory or module to test instead of running all Nautobot tests",
"failfast": "fail as soon as a single test fails don't run the entire test suite",
"buffer": "Discard output from passing tests",
}
)
def unittest(context, keepdb=False, label="nautobot_plugin_chatops_aci", failfast=False, buffer=True):
"""Run Nautobot unit tests."""
command = f"coverage run --module nautobot.core.cli test {label}"
if keepdb:
command += " --keepdb"
if failfast:
command += " --failfast"
if buffer:
command += " --buffer"
run_command(context, command)
@task
def unittest_coverage(context):
"""Report on code test coverage as measured by 'invoke unittest'."""
command = "coverage report --skip-covered --include 'nautobot_plugin_chatops_aci/*' --omit *migrations*"
run_command(context, command)
@task(
help={
"failfast": "fail as soon as a single test fails don't run the entire test suite",
}
)
def tests(context, failfast=False):
"""Run all tests for this plugin."""
# If we are not running locally, start the docker containers so we don't have to for each test
if not is_truthy(context.nautobot_plugin_chatops_aci.local):
print("Starting Docker Containers...")
start(context)
# Sorted loosely from fastest to slowest
print("Running black...")
black(context)
print("Running flake8...")
flake8(context)
print("Running bandit...")
bandit(context)
print("Running pydocstyle...")
pydocstyle(context)
print("Running yamllint...")
yamllint(context)
print("Running pylint...")
pylint(context)
print("Running unit tests...")
unittest(context, failfast=failfast)
print("All tests have passed!")
unittest_coverage(context)
| 31.564103 | 177 | 0.646304 |
from distutils.util import strtobool
from invoke import Collection, task as invoke_task
import os
def is_truthy(arg):
if isinstance(arg, bool):
return arg
return bool(strtobool(arg))
namespace = Collection("nautobot_plugin_chatops_aci")
namespace.configure(
{
"nautobot_plugin_chatops_aci": {
"nautobot_ver": "latest",
"project_name": "nautobot-plugin-chatops-aci",
"python_ver": "3.8",
"local": False,
"compose_dir": os.path.join(os.path.dirname(__file__), "development"),
"compose_files": [
"docker-compose.requirements.yml",
"docker-compose.base.yml",
"docker-compose.dev.yml",
],
}
}
)
def task(function=None, *args, **kwargs):
def task_wrapper(function=None):
if args or kwargs:
task_func = invoke_task(*args, **kwargs)(function)
else:
task_func = invoke_task(function)
namespace.add_task(task_func)
return task_func
if function:
return task_wrapper(function)
return task_wrapper
def docker_compose(context, command, **kwargs):
build_env = {
"NAUTOBOT_VER": context.nautobot_plugin_chatops_aci.nautobot_ver,
"PYTHON_VER": context.nautobot_plugin_chatops_aci.python_ver,
}
compose_command = f'docker-compose --project-name {context.nautobot_plugin_chatops_aci.project_name} --project-directory "{context.nautobot_plugin_chatops_aci.compose_dir}"'
for compose_file in context.nautobot_plugin_chatops_aci.compose_files:
compose_file_path = os.path.join(context.nautobot_plugin_chatops_aci.compose_dir, compose_file)
compose_command += f' -f "{compose_file_path}"'
compose_command += f" {command}"
print(f'Running docker-compose command "{command}"')
return context.run(compose_command, env=build_env, **kwargs)
def run_command(context, command, **kwargs):
if is_truthy(context.nautobot_plugin_chatops_aci.local):
context.run(command, **kwargs)
else:
docker_compose_status = "ps --services --filter status=running"
results = docker_compose(context, docker_compose_status, hide="out")
if "nautobot" in results.stdout:
compose_command = f"exec nautobot {command}"
else:
compose_command = f"run --entrypoint '{command}' nautobot"
docker_compose(context, compose_command, pty=True)
@task(
help={
"force_rm": "Always remove intermediate containers",
"cache": "Whether to use Docker's cache when building the image (defaults to enabled)",
}
)
def build(context, force_rm=False, cache=True):
command = "build"
if not cache:
command += " --no-cache"
if force_rm:
command += " --force-rm"
print(f"Building Nautobot with Python {context.nautobot_plugin_chatops_aci.python_ver}...")
docker_compose(context, command)
@task
def generate_packages(context):
command = "poetry build"
run_command(context, command)
# ------------------------------------------------------------------------------
# START / STOP / DEBUG
# ------------------------------------------------------------------------------
@task
def debug(context):
print("Starting Nautobot in debug mode...")
docker_compose(context, "up")
@task
def start(context):
print("Starting Nautobot in detached mode...")
docker_compose(context, "up --detach")
@task
def restart(context):
print("Restarting Nautobot...")
docker_compose(context, "restart")
@task
def stop(context):
print("Stopping Nautobot...")
docker_compose(context, "down")
@task
def destroy(context):
print("Destroying Nautobot...")
docker_compose(context, "down --volumes")
@task
def vscode(context):
command = "code nautobot.code-workspace"
context.run(command)
# ------------------------------------------------------------------------------
# ACTIONS
# ------------------------------------------------------------------------------
@task
def nbshell(context):
command = "nautobot-server nbshell"
run_command(context, command)
@task
def cli(context):
run_command(context, "bash")
@task(
help={
"user": "name of the superuser to create (default: admin)",
}
)
def createsuperuser(context, user="admin"):
command = f"nautobot-server createsuperuser --username {user}"
run_command(context, command)
@task(
help={
"name": "name of the migration to be created; if unspecified, will autogenerate a name",
}
)
def makemigrations(context, name=""):
command = "nautobot-server makemigrations nautobot_plugin_chatops_aci"
if name:
command += f" --name {name}"
run_command(context, command)
@task
def migrate(context):
command = "nautobot-server migrate"
run_command(context, command)
@task(help={})
def post_upgrade(context):
command = "nautobot-server post_upgrade"
run_command(context, command)
# ------------------------------------------------------------------------------
# TESTS
# ------------------------------------------------------------------------------
@task(
help={
"autoformat": "Apply formatting recommendations automatically, rather than failing if formatting is incorrect.",
}
)
def black(context, autoformat=False):
if autoformat:
black_command = "black"
else:
black_command = "black --check --diff"
command = f"{black_command} ."
run_command(context, command)
@task
def flake8(context):
command = "flake8 ."
run_command(context, command)
@task
def hadolint(context):
command = "hadolint development/Dockerfile"
run_command(context, command)
@task
def pylint(context):
command = (
'pylint --init-hook "import nautobot; nautobot.setup()" --rcfile pyproject.toml nautobot_plugin_chatops_aci'
)
run_command(context, command)
@task
def pydocstyle(context):
# We exclude the /migrations/ directory since it is autogenerated code
command = "pydocstyle ."
run_command(context, command)
@task
def yamllint(context):
command = "yamllint . --format standard"
run_command(context, command)
@task
def bandit(context):
command = "bandit --recursive . --configfile .bandit.yml"
run_command(context, command)
@task
def check_migrations(context):
command = "nautobot-server --config=nautobot/core/tests/nautobot_config.py makemigrations --dry-run --check"
run_command(context, command)
@task(
help={
"keepdb": "save and re-use test database between test runs for faster re-testing.",
"label": "specify a directory or module to test instead of running all Nautobot tests",
"failfast": "fail as soon as a single test fails don't run the entire test suite",
"buffer": "Discard output from passing tests",
}
)
def unittest(context, keepdb=False, label="nautobot_plugin_chatops_aci", failfast=False, buffer=True):
command = f"coverage run --module nautobot.core.cli test {label}"
if keepdb:
command += " --keepdb"
if failfast:
command += " --failfast"
if buffer:
command += " --buffer"
run_command(context, command)
@task
def unittest_coverage(context):
command = "coverage report --skip-covered --include 'nautobot_plugin_chatops_aci/*' --omit *migrations*"
run_command(context, command)
@task(
help={
"failfast": "fail as soon as a single test fails don't run the entire test suite",
}
)
def tests(context, failfast=False):
# If we are not running locally, start the docker containers so we don't have to for each test
if not is_truthy(context.nautobot_plugin_chatops_aci.local):
print("Starting Docker Containers...")
start(context)
print("Running black...")
black(context)
print("Running flake8...")
flake8(context)
print("Running bandit...")
bandit(context)
print("Running pydocstyle...")
pydocstyle(context)
print("Running yamllint...")
yamllint(context)
print("Running pylint...")
pylint(context)
print("Running unit tests...")
unittest(context, failfast=failfast)
print("All tests have passed!")
unittest_coverage(context)
| true | true |
f726e32c037672a3a1015b66f43061d44ada00cc | 1,226 | py | Python | tryalgo/dist_grid.py | Shloub/tryalgo | ec01a16dd6a6053047f1948531bd5e9b2abf0fab | [
"MIT"
] | null | null | null | tryalgo/dist_grid.py | Shloub/tryalgo | ec01a16dd6a6053047f1948531bd5e9b2abf0fab | [
"MIT"
] | null | null | null | tryalgo/dist_grid.py | Shloub/tryalgo | ec01a16dd6a6053047f1948531bd5e9b2abf0fab | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Distances in a grid
# jill-jenn vie et christoph durr - 2014-2015
from collections import deque
# snip{
def dist_grid(grid, source, target=None):
"""Distances in a grid by BFS
:param grid: matrix with 4-neighborhood
:param (int,int) source: pair of row, column indices
:param (int,int) target: exploration stops if target is reached
:complexity: linear in grid size
"""
rows = len(grid)
cols = len(grid[0])
dir = [(0, +1, '>'), (0, -1, '<'), (+1, 0, 'v'), (-1, 0, '^')]
i, j = source
grid[i][j] = 's'
Q = deque()
Q.append(source)
while Q:
i1, j1 = Q.popleft()
for di, dj, symbol in dir: # explorer toutes les directions
i2 = i1 + di
j2 = j1 + dj
if not (0 <= i2 and i2 < rows and 0 <= j2 and j2 < cols):
continue # bord de la grille dépassé
if grid[i2][j2] != ' ': # case inacc. ou déjà visitée
continue
grid[i2][j2] = symbol # marquer visite
if (i2, j2) == target:
grid[i2][j2] = 't' # but atteint
return
Q.append((i2, j2))
# snip}
| 30.65 | 69 | 0.513866 |
from collections import deque
def dist_grid(grid, source, target=None):
rows = len(grid)
cols = len(grid[0])
dir = [(0, +1, '>'), (0, -1, '<'), (+1, 0, 'v'), (-1, 0, '^')]
i, j = source
grid[i][j] = 's'
Q = deque()
Q.append(source)
while Q:
i1, j1 = Q.popleft()
for di, dj, symbol in dir:
i2 = i1 + di
j2 = j1 + dj
if not (0 <= i2 and i2 < rows and 0 <= j2 and j2 < cols):
continue
if grid[i2][j2] != ' ':
continue
grid[i2][j2] = symbol
if (i2, j2) == target:
grid[i2][j2] = 't'
return
Q.append((i2, j2))
| true | true |
f726e3fffed7bf64ee84e30593164304e7fa5261 | 83,112 | py | Python | genepattern/utils/clustering.py | genepattern/genepattern-utils | 950d748301b3c4d07ad8d24c9b037bbb9b4c80e2 | [
"BSD-3-Clause"
] | null | null | null | genepattern/utils/clustering.py | genepattern/genepattern-utils | 950d748301b3c4d07ad8d24c9b037bbb9b4c80e2 | [
"BSD-3-Clause"
] | null | null | null | genepattern/utils/clustering.py | genepattern/genepattern-utils | 950d748301b3c4d07ad8d24c9b037bbb9b4c80e2 | [
"BSD-3-Clause"
] | null | null | null | """
Copied and modified from the dev branch of:
https://github.com/genepattern/HierarchicalClustering
on 2018-01-31
"""
import sys
import numpy as np
from statistics import mode
from sklearn.metrics import pairwise
from sklearn import metrics
from scipy.cluster.hierarchy import dendrogram
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
import itertools
from sklearn.cluster import AgglomerativeClustering
import scipy
import itertools
from collections import defaultdict
from .elemental import *
from .information import *
# check if these are repeated:
import os
import sys
tasklib_path = os.path.dirname(os.path.realpath(sys.argv[0]))
# sys.path.append(tasklib_path + "/ccalnoir")
# 2018-02-06 Maybe uncomment these next two
# import matplotlib as mpl
# mpl.use('Agg')
# This is forprinting the hyperlink
from IPython.core.display import display, HTML
# import pandas as pd
# import numpy as np
import scipy
import seaborn as sns
from matplotlib import pyplot as plt
from matplotlib import gridspec
from sklearn.cluster import AgglomerativeClustering
# from time import time
# import cuzcatlan as cusca
sns.set_style("white")
import matplotlib as mpl
mpl.rcParams['ytick.labelsize'] = 16
mpl.rcParams['xtick.labelsize'] = 16
mpl.rcParams['axes.titlesize'] = 24
mpl.rcParams['axes.labelsize'] = 20
SIGNIFICANT_DIGITS = 7
input_col_distance_dict = {
# These are the values I expect
"No column clustering": "No_column_clustering",
"Uncentered correlation": "uncentered_pearson",
"Pearson correlation": "pearson",
"Uncentered correlation, absolute value": "absolute_uncentered_pearson",
"Pearson correlation, absolute value": "absolute_pearson",
"Spearman's rank correlation": "spearman",
"Kendall's tau": "kendall",
"Euclidean distance": "euclidean",
"City-block distance": "manhattan",
"No_column_clustering": "No_column_clustering",
# These are the values the GpUnit tests give
"0": "No_column_clustering",
"1": "uncentered_pearson",
"2": "pearson",
"3": "absolute_uncentered_pearson",
"4": "absolute_pearson",
"5": "spearman",
"6": "kendall",
"7": "euclidean",
"8": "manhattan",
"9": "information_coefficient",
# These are the values I expect from the comand line
"no_col": "No_column_clustering",
"uncentered_pearson": "uncentered_pearson",
"pearson": "pearson",
"absolute_uncentered_pearson": "absolute_uncentered_pearson",
"absolute_pearson": "absolute_pearson",
"spearman": "spearman",
"kendall": "kendall",
"euclidean": "euclidean",
"manhattan": "manhattan",
"Cosine": "cosine",
"cosine": "cosine",
"ic": "information_coefficient",
"information_coefficient": "information_coefficient",
"Information Coefficient": "information_coefficient",
}
input_row_distance_dict = {
# These are the values I expect
"No row clustering": "No_row_clustering",
"Uncentered correlation": "uncentered_pearson",
"Pearson correlation": "pearson",
"Uncentered correlation, absolute value": "absolute_uncentered_pearson",
"Pearson correlation, absolute value": "absolute_pearson",
"Spearman's rank correlation": "spearman",
"Kendall's tau": "kendall",
"Euclidean distance": "euclidean",
"City-block distance": "manhattan",
"No_row_clustering": "No_row_clustering",
# These are the values the GpUnit tests give
"0": "No_row_clustering",
"1": "uncentered_pearson",
"2": "pearson",
"3": "absolute_uncentered_pearson",
"4": "absolute_pearson",
"5": "spearman",
"6": "kendall",
"7": "euclidean",
"8": "manhattan",
"9": "information_coefficient",
# These are the values I expect from the comand line
"no_row": "No_row_clustering",
"uncentered_pearson": "uncentered_pearson",
"pearson": "pearson",
"absolute_uncentered_pearson": "absolute_uncentered_pearson",
"absolute_pearson": "absolute_pearson",
"spearman": "spearman",
"kendall": "kendall",
"euclidean": "euclidean",
"manhattan": "manhattan",
"Cosine": "cosine",
"cosine": "cosine",
"ic": "information_coefficient",
"information_coefficient": "information_coefficient",
"Information Coefficient": "information_coefficient",
}
input_clustering_method = {
# These are the values I expect
'Pairwise complete-linkage': 'complete',
'Pairwise average-linkage': 'average',
'Pairwise ward-linkage': 'ward',
# These are the values the GpUnit test give
'm': 'complete',
'a': 'average', # I think this is the default
}
input_row_centering = {
# These are the values I expect
'No': None,
'Subtract the mean from each row': 'Mean',
'Subtract the median from each row': 'Median',
# These are the values the GpUnit test give
'None': None,
'Median': 'Median',
'Mean': 'Mean',
}
input_row_normalize = {
# These are the values I expect
'No': False,
'Yes': True,
# These are the values the GpUnit test give
'False': False,
'True': True,
}
input_col_centering = {
# These are the values I expect
'No': None,
'Subtract the mean from each column': 'Mean',
'Subtract the median from each column': 'Median',
# These are the values the GpUnit test give
'None': None,
'Median': 'Median',
'Mean': 'Mean',
}
input_col_normalize = {
# These are the values I expect
'No': False,
'Yes': True,
# These are the values the GpUnit test give
'False': False,
'True': True,
}
def parse_inputs(args=sys.argv):
# inp = []
# inp = args
# Error handling:
arg_n = len(args)
if arg_n == 1:
sys.exit("Not enough parameters files were provided. This module needs a GCT file to work.")
elif arg_n == 2:
gct_name = args[1]
col_distance_metric = 'euclidean'
output_distances = False
row_distance_metric = 'No_row_clustering'
clustering_method = 'Pairwise average-linkage'
output_base_name = 'HC_out'
row_normalization = False
col_normalization = False
row_centering = None
col_centering = None
print("Using:")
print("\tgct_name =", gct_name)
print("\tcol_distance_metric = euclidean (default value)")
print("\toutput_distances =", output_distances, "(default: not computing it and creating a file)")
print("\trow_distance_metric =", row_distance_metric, "(default: No row clustering)")
print("\tclustering_method =", clustering_method, "(default: Pairwise average-linkage)")
print("\toutput_base_name =", output_base_name, "(default: HC_out)")
print("\trow_normalization =", row_normalization, "(default: False)")
print("\tcol_normalization =", col_normalization, "(default: False)")
print("\trow_centering =", row_centering, "(default: None)")
print("\tcol_centering =", col_centering, "(default: None)")
elif arg_n == 3:
gct_name = args[1]
col_distance_metric = args[2]
output_distances = False
row_distance_metric = 'No_row_clustering'
clustering_method = 'Pairwise average-linkage'
output_base_name = 'HC_out'
row_normalization = False
col_normalization = False
row_centering = None
col_centering = None
print("Using:")
print("\tgct_name =", gct_name)
print("\tcol_distance_metric =", input_col_distance_dict[col_distance_metric])
print("\toutput_distances =", output_distances, "(default: not computing it and creating a file)")
print("\trow_distance_metric =", row_distance_metric, "(default: No row clustering)")
print("\tclustering_method =", clustering_method, "(default: Pairwise average-linkage)")
print("\toutput_base_name =", output_base_name, "(default: HC_out)")
print("\trow_normalization =", row_normalization, "(default: False)")
print("\tcol_normalization =", col_normalization, "(default: False)")
print("\trow_centering =", row_centering, "(default: None)")
print("\tcol_centering =", col_centering, "(default: None)")
elif arg_n == 4:
gct_name = args[1]
col_distance_metric = args[2]
output_distances = args[3]
row_distance_metric = 'No_row_clustering'
clustering_method = 'Pairwise average-linkage'
output_base_name = 'HC_out'
row_normalization = False
col_normalization = False
row_centering = None
col_centering = None
col_distance_metric = input_col_distance_dict[col_distance_metric]
if (output_distances == 'False') or (output_distances == 'F') \
or (output_distances == 'false') or (output_distances == 'f'):
output_distances = False
else:
output_distances = True
print("Using:")
print("\tgct_name =", gct_name)
print("\tcol_distance_metric =", col_distance_metric)
print("\toutput_distances =", output_distances)
print("\trow_distance_metric =", row_distance_metric, "(default: No row clustering)")
print("\tclustering_method =", clustering_method, "(default: Pairwise average-linkage)")
print("\toutput_base_name =", output_base_name, "(default: HC_out)")
print("\trow_normalization =", row_normalization, "(default: False)")
print("\tcol_normalization =", col_normalization, "(default: False)")
print("\trow_centering =", row_centering, "(default: None)")
print("\tcol_centering =", col_centering, "(default: None)")
elif arg_n == 5:
gct_name = args[1]
col_distance_metric = args[2]
output_distances = args[3]
row_distance_metric = args[4]
clustering_method = 'Pairwise average-linkage'
# clustering_method = 'Pairwise complete-linkage'
output_base_name = 'HC_out'
row_normalization = False
col_normalization = False
row_centering = None
col_centering = None
col_distance_metric = input_col_distance_dict[col_distance_metric]
row_distance_metric = input_row_distance_dict[row_distance_metric]
if (output_distances == 'False') or (output_distances == 'F') \
or (output_distances == 'false') or (output_distances == 'f'):
output_distances = False
else:
output_distances = True
print("Using:")
print("\tgct_name =", gct_name)
print("\tcol_distance_metric =", col_distance_metric)
print("\toutput_distances =", output_distances)
print("\trow_distance_metric =", row_distance_metric)
print("\tclustering_method =", clustering_method, "(default: Pairwise average-linkage)")
print("\toutput_base_name =", output_base_name, "(default: HC_out)")
print("\trow_normalization =", row_normalization, "(default: False)")
print("\tcol_normalization =", col_normalization, "(default: False)")
print("\trow_centering =", row_centering, "(default: None)")
print("\tcol_centering =", col_centering, "(default: None)")
elif arg_n == 6:
gct_name = args[1]
col_distance_metric = args[2]
output_distances = args[3]
row_distance_metric = args[4]
clustering_method = args[5]
col_distance_metric = input_col_distance_dict[col_distance_metric]
row_distance_metric = input_row_distance_dict[row_distance_metric]
clustering_method = input_clustering_method[clustering_method]
if clustering_method not in linkage_dic.keys():
exit("Clustering method chosen not supported. This should not have happened.")
if (linkage_dic[clustering_method] == 'ward') and (col_distance_metric != 'average'):
exit("When choosing 'Pairwise ward-linkage' the distance metric *must* be 'average' ")
output_base_name = 'HC_out'
row_normalization = False
col_normalization = False
row_centering = None
col_centering = None
if (output_distances == 'False') or (output_distances == 'F') \
or (output_distances == 'false') or (output_distances == 'f'):
output_distances = False
else:
output_distances = True
print("Using:")
print("\tgct_name =", gct_name)
print("\tcol_distance_metric =", col_distance_metric)
print("\toutput_distances =", output_distances)
print("\trow_distance_metric =", row_distance_metric)
print("\tclustering_method =", clustering_method)
print("\toutput_base_name =", output_base_name, "(default: HC_out)")
print("\trow_normalization =", row_normalization, "(default: False)")
print("\tcol_normalization =", col_normalization, "(default: False)")
print("\trow_centering =", row_centering, "(default: None)")
print("\tcol_centering =", col_centering, "(default: None)")
elif arg_n == 7:
gct_name = args[1]
col_distance_metric = args[2]
output_distances = args[3]
row_distance_metric = args[4]
clustering_method = args[5]
output_base_name = args[6]
row_normalization = False
col_normalization = False
row_centering = None
col_centering = None
col_distance_metric = input_col_distance_dict[col_distance_metric]
row_distance_metric = input_row_distance_dict[row_distance_metric]
clustering_method = input_clustering_method[clustering_method]
if (output_distances == 'False') or (output_distances == 'F') \
or (output_distances == 'false') or (output_distances == 'f'):
output_distances = False
else:
output_distances = True
print("Using:")
print("\tgct_name =", gct_name)
print("\tcol_distance_metric =", col_distance_metric)
print("\toutput_distances =", output_distances)
print("\trow_distance_metric =", row_distance_metric)
print("\tclustering_method =", clustering_method)
print("\toutput_base_name =", output_base_name)
print("\trow_normalization =", row_normalization, "(default: False)")
print("\tcol_normalization =", col_normalization, "(default: False)")
print("\trow_centering =", row_centering, "(default: None)")
print("\tcol_centering =", col_centering, "(default: None)")
elif arg_n == 8:
gct_name = args[1]
col_distance_metric = args[2]
output_distances = args[3]
row_distance_metric = args[4]
clustering_method = args[5]
output_base_name = args[6]
row_normalization = args[7]
col_normalization = False
row_centering = None
col_centering = None
col_distance_metric = input_col_distance_dict[col_distance_metric]
row_distance_metric = input_row_distance_dict[row_distance_metric]
clustering_method = input_clustering_method[clustering_method]
if (output_distances == 'False') or (output_distances == 'F') \
or (output_distances == 'false') or (output_distances == 'f'):
output_distances = False
else:
output_distances = True
row_normalization = input_row_normalize[row_normalization]
# if (row_normalization == 'False') or (row_normalization == 'F') \
# or (row_normalization == 'false') or (row_normalization == 'f'):
# row_normalization = False
# else:
# row_normalization = True
print("Using:")
print("\tgct_name =", gct_name)
print("\tcol_distance_metric =", col_distance_metric)
print("\toutput_distances =", output_distances)
print("\trow_distance_metric =", row_distance_metric)
print("\tclustering_method =", clustering_method)
print("\toutput_base_name =", output_base_name)
print("\trow_normalization =", row_normalization)
print("\tcol_normalization =", col_normalization, "(default: False)")
print("\trow_centering =", row_centering, "(default: None)")
print("\tcol_centering =", col_centering, "(default: None)")
elif arg_n == 9:
gct_name = args[1]
col_distance_metric = args[2]
output_distances = args[3]
row_distance_metric = args[4]
clustering_method = args[5]
output_base_name = args[6]
row_normalization = args[7]
col_normalization = args[8]
row_centering = None
col_centering = None
col_distance_metric = input_col_distance_dict[col_distance_metric]
row_distance_metric = input_row_distance_dict[row_distance_metric]
clustering_method = input_clustering_method[clustering_method]
if (output_distances == 'False') or (output_distances == 'F') \
or (output_distances == 'false') or (output_distances == 'f'):
output_distances = False
else:
output_distances = True
# Row normalization
row_normalization = input_row_normalize[row_normalization]
# if (row_normalization == 'False') or (row_normalization == 'F') \
# or (row_normalization == 'false') or (row_normalization == 'f'):
# row_normalization = False
# else:
# row_normalization = True
# Column normalization
col_normalization = input_col_normalize[col_normalization]
# if (col_normalization == 'False') or (col_normalization == 'F') \
# or (col_normalization == 'false') or (col_normalization == 'f'):
# col_normalization = False
# else:
# col_normalization = True
print("Using:")
print("\tgct_name =", gct_name)
print("\tcol_distance_metric =", col_distance_metric)
print("\toutput_distances =", output_distances)
print("\trow_distance_metric =", row_distance_metric)
print("\tclustering_method =", clustering_method)
print("\toutput_base_name =", output_base_name)
print("\trow_normalization =", row_normalization)
print("\tcol_normalization =", col_normalization)
print("\trow_centering =", row_centering, "(default: None)")
print("\tcol_centering =", col_centering, "(default: None)")
elif arg_n == 10:
gct_name = args[1]
col_distance_metric = args[2]
output_distances = args[3]
row_distance_metric = args[4]
clustering_method = args[5]
output_base_name = args[6]
row_normalization = args[7]
col_normalization = args[8]
row_centering = args[9]
col_centering = None
col_distance_metric = input_col_distance_dict[col_distance_metric]
row_distance_metric = input_row_distance_dict[row_distance_metric]
clustering_method = input_clustering_method[clustering_method]
if (output_distances == 'False') or (output_distances == 'F') \
or (output_distances == 'false') or (output_distances == 'f'):
output_distances = False
else:
output_distances = True
# Row normalization
row_normalization = input_row_normalize[row_normalization]
# if (row_normalization == 'False') or (row_normalization == 'F') \
# or (row_normalization == 'false') or (row_normalization == 'f'):
# row_normalization = False
# else:
# row_normalization = True
# Column normalization
col_normalization = input_col_normalize[col_normalization]
# if (col_normalization == 'False') or (col_normalization == 'F') \
# or (col_normalization == 'false') or (col_normalization == 'f'):
# col_normalization = False
# else:
# col_normalization = True
# row_centering
row_centering = input_row_centering[row_centering]
if (row_centering == 'None') or (col_normalization == 'N') \
or (row_centering == 'none') or (col_normalization == 'n'):
col_normalization = None
print("Using:")
print("\tgct_name =", gct_name)
print("\tcol_distance_metric =", col_distance_metric)
print("\toutput_distances =", output_distances)
print("\trow_distance_metric =", row_distance_metric)
print("\tclustering_method =", clustering_method)
print("\toutput_base_name =", output_base_name)
print("\trow_normalization =", row_normalization)
print("\tcol_normalization =", col_normalization)
print("\trow_centering =", row_centering)
print("\tcol_centering =", col_centering, "(default: None)")
elif arg_n == 11:
gct_name = args[1]
col_distance_metric = args[2]
output_distances = args[3]
row_distance_metric = args[4]
clustering_method = args[5]
output_base_name = args[6]
row_normalization = args[7]
col_normalization = args[8]
row_centering = args[9]
col_centering = args[10]
col_distance_metric = input_col_distance_dict[col_distance_metric]
row_distance_metric = input_row_distance_dict[row_distance_metric]
clustering_method = input_clustering_method[clustering_method]
if (output_distances == 'False') or (output_distances == 'F') \
or (output_distances == 'false') or (output_distances == 'f'):
output_distances = False
else:
output_distances = True
# Row normalization
row_normalization = input_row_normalize[row_normalization]
# if (row_normalization == 'False') or (row_normalization == 'F') \
# or (row_normalization == 'false') or (row_normalization == 'f'):
# row_normalization = False
# else:
# row_normalization = True
# Column normalization
col_normalization = input_col_normalize[col_normalization]
# if (col_normalization == 'False') or (col_normalization == 'F') \
# or (col_normalization == 'false') or (col_normalization == 'f'):
# col_normalization = False
# else:
# col_normalization = True
# row_centering
row_centering = input_row_centering[row_centering]
if (row_centering == 'None') or (col_normalization == 'N') \
or (row_centering == 'none') or (col_normalization == 'n'):
col_normalization = None
# col_centering
col_centering = input_col_centering[col_centering]
if (col_centering == 'None') or (col_centering == 'N') \
or (col_centering == 'none') or (col_centering == 'n'):
col_centering = None
print("Using:")
print("\tgct_name =", gct_name)
print("\tcol_distance_metric =", col_distance_metric)
print("\toutput_distances =", output_distances)
print("\trow_distance_metric =", row_distance_metric)
print("\tclustering_method =", clustering_method)
print("\toutput_base_name =", output_base_name)
print("\trow_normalization =", row_normalization)
print("\tcol_normalization =", col_normalization)
print("\trow_centering =", row_centering)
print("\tcol_centering =", col_centering)
else:
sys.exit("Too many inputs. This module needs only a GCT file to work, "
"plus an optional input choosing between Pearson Correlation or Information Coefficient.")
print(args)
return gct_name, col_distance_metric, output_distances, row_distance_metric, clustering_method, output_base_name, \
row_normalization, col_normalization, row_centering, col_centering
def plot_dendrogram(model, data, tree, axis, dist=mydist, clustering_method='average',
title='no_title.png', color_threshold=None, orientation='top', **kwargs):
# plt.clf()
# modified from https://github.com/scikit-learn/scikit-learn/pull/3464/files
# Children of hierarchical clustering
children = model.children_
# Distances between each pair of children
# TODO: Fix this mydist
# distance = dendodist(children, euclidian_similarity)
# distance = dendodist(children, dist)
og_distances = better_dendodist(children, dist, tree, data, axis=axis, clustering_method=clustering_method)
# print(og_distances)
# og_distances = [abs(temp) for temp in og_distances]
# Turn similarity into non-negative value Scipy's dendrogram needs this
if dist in [custom_euclidean_sim, absolute_uncentered_pearson_corr, absolute_pearson_corr]:
# These similarities are already nonnegative [0,inf) or [0,1]
# og_distances = og_distances
pass
else: # all the correlation similarities [-1,-1]
og_distances = [temp + 1 for temp in og_distances]
# Now that all similarities are nonnegative, we turn them into a distance for plotting purposes
og_distances = [1 / temp for temp in og_distances]
# print(og_distances)
distance = np.cumsum(og_distances)
# distance = og_distances
# distance = better_dendodist(children, dist, tree, data, axis=axis)
# norm_distances = []
# for value in distance:
# norm_distances.append(1/value)
# norm_distances = distance
list_of_children = list(get_children(tree, leaves_are_self_children=False).values())
no_of_observations = [len(i) for i in list_of_children if i]
no_of_observations.append(len(no_of_observations) + 1)
# print(len(no_of_observations))
# print(children)
# print(list(tree.values()))
# print(norm_distances)
# print(distance)
if all(value == 0 for value in distance):
# If all distances are zero, then use uniform distance
distance = np.arange(len(distance))
# print(distance)
# print(np.cumsum(distance))
# The number of observations contained in each cluster level
# no_of_observations = np.arange(2, children.shape[0]+2)
# print(no_of_observations)
# Create linkage matrix and then plot the dendrogram
# linkage_matrix = np.column_stack([children, distance, no_of_observations]).astype(float)
# linkage_matrix = np.column_stack([children, np.cumsum(distance), no_of_observations]).astype(float)
linkage_matrix = np.column_stack([children, distance, no_of_observations]).astype(float)
# linkage_matrix = np.column_stack([children, norm_distances, no_of_observations]).astype(float)
# print(linkage_matrix)
# Plot the corresponding dendrogram
# print(scipy.cluster.hierarchy.cut_tree(linkage_matrix, n_clusters=5))
# print(color_threshold)
# find what the height at which to cut the dendrogram
if color_threshold is not None:
if color_threshold == 1:
color_threshold = 2
if color_threshold > (len(linkage_matrix) + 1):
color_threshold = (len(linkage_matrix) + 1)
# print('Finding the right cut')
color_threshold = linkage_matrix[-(color_threshold - 1)][2] - np.finfo(float).eps
# color_threshold = linkage_matrix[-(color_threshold - 1)][2] + 10*np.finfo(float).eps # Adding more wiggle room
# print(color_threshold)
R = dendrogram(linkage_matrix, color_threshold=color_threshold, orientation=orientation, **kwargs)
# R = dendrogram(linkage_matrix, **kwargs)
# [label.set_rotation(90) for label in plt.gca().get_xticklabels()]
order_of_columns = R['ivl']
# # print(order_of_columns)
# plt.gca().get_yaxis().set_visible(False)
# plt.savefig(title, dpi=300)
# plt.show()
# n = len(linkage_matrix) + 1
# cache = dict()
# for k in range(len(linkage_matrix)):
# c1, c2 = int(linkage_matrix[k][0]), int(linkage_matrix[k][1])
# c1 = [c1] if c1 < n else cache.pop(c1)
# c2 = [c2] if c2 < n else cache.pop(c2)
# cache[n + k] = c1 + c2
# order_of_columns = cache[2 * len(linkage_matrix)]
# print(order_of_columns)
# print(linkage_matrix)
# print("---")
# print(no_of_observations)
# print("---")
# print(list_of_children)
# print("---")
#
# print(len(order_of_columns))
# print(color_threshold)
# clusters2idxs, idxs2clusters = get_cluster_classes(R)
#
# print(clusters2idxs)
# print(idxs2clusters)
# print("---")
# print(get_children(tree, leaves_are_self_children=False))
# print("---")
# print(get_children(tree, leaves_are_self_children=False, only_leaves_are_children=False))
return order_of_columns, linkage_matrix
def get_clusters(tree):
return
def get_cluster_classes(den, label='ivl'):
# from http://www.nxn.se/valent/extract-cluster-elements-by-color-in-python
clusters2idxs = defaultdict(list)
idxs2clusters = {}
# for c, pi in zip(den['color_list'], den['icoord']):
# for leg in pi[1:3]:
# i = (leg - 5.0) / 10.0
# if abs(i - int(i)) < 1e-5:
# clusters2idxs[c].append(int(i))
# idxs2clusters[int(i)] = c
# # print(c, i)
# cluster_classes = Clusters()
# for c, l in cluster_idxs.items():
# i_l = [den[label][i] for i in l]
# cluster_classes[c] = i_l
# Trying something new:
print(den.keys())
print(len(den['icoord']))
print(len(den['dcoord']))
print(len(den['ivl']))
print(len(den['leaves']))
print(den['leaves'])
print(len(den['color_list']))
print(den['color_list'])
return clusters2idxs, idxs2clusters
def order_leaves(model, data, tree, labels, axis=0, dist=mydist, reverse=False):
# Adapted from here: https://stackoverflow.com/questions/12572436/calculate-ordering-of-dendrogram-leaves
children = model.children_
# distance = better_dendodist(children, dist, tree, data, axis=axis)
# if all(value == 0 for value in distance):
# distance = np.arange(len(distance))
# list_of_children = list(get_children(tree, leaves_are_self_children=False).values())
# no_of_observations = [len(i) for i in list_of_children if i]
# no_of_observations.append(len(no_of_observations)+1)
# Create linkage matrix and then plot the dendrogram
# linkage_matrix = np.column_stack([children, distance, no_of_observations]).astype(float)
pseudo_linkage_matrix = np.column_stack([children]).astype(float)
n = len(pseudo_linkage_matrix) + 1
# This orders leaves by number of clusters
cache = dict()
for k in range(len(pseudo_linkage_matrix)):
c1, c2 = int(pseudo_linkage_matrix[k][0]), int(pseudo_linkage_matrix[k][1])
c1 = [c1] if c1 < n else cache.pop(c1)
c2 = [c2] if c2 < n else cache.pop(c2)
cache[n + k] = c1 + c2
numeric_order_of_leaves = cache[2 * len(pseudo_linkage_matrix)]
if reverse:
numeric_order_of_leaves = list(reversed(numeric_order_of_leaves))
return [labels[i] for i in numeric_order_of_leaves]
def two_plot_two_dendrogram(model, dist=mydist, **kwargs):
# modified from https://github.com/scikit-learn/scikit-learn/pull/3464/files
# Children of hierarchical clustering
children = model.children_
# Distances between each pair of children
distance = dendodist(children, dist)
if all(value == 0 for value in distance):
# If all distances are zero, then use uniform distance
distance = np.arange(len(distance))
# The number of observations contained in each cluster level
no_of_observations = np.arange(2, children.shape[0] + 2)
# Create linkage matrix and then plot the dendrogram
linkage_matrix = np.column_stack([children, distance, no_of_observations]).astype(float)
# Plot the corresponding dendrogram
R = dendrogram(linkage_matrix, color_threshold=0, orientation='left', **kwargs)
# [label.set_rotation(90) for label in plt.gca().get_xticklabels()]
order_of_rows = R['ivl']
# print(order_of_columns)
plt.gca().get_xaxis().set_visible(False)
return list(reversed(order_of_rows))
def my_affinity_generic(M, metric):
return np.array([np.array([metric(a, b) for a in M]) for b in M])
def my_affinity_i(M):
return np.array([[information_coefficient_dist(a, b) for a in M] for b in M])
def my_affinity_ai(M):
return np.array([[absolute_information_coefficient_dist(a, b) for a in M] for b in M])
def my_affinity_p(M):
return np.array([[custom_pearson_dist(a, b) for a in M] for b in M])
def my_affinity_s(M):
return np.array([[custom_spearman_dist(a, b) for a in M] for b in M])
def my_affinity_k(M):
return np.array([[custom_kendall_tau_dist(a, b) for a in M] for b in M])
def my_affinity_ap(M):
return np.array([[absolute_pearson_dist(a, b) for a in M] for b in M])
def my_affinity_u(M):
return np.array([[uncentered_pearson_dist(a, b) for a in M] for b in M])
def my_affinity_au(M):
return np.array([[absolute_uncentered_pearson_dist(a, b) for a in M] for b in M])
def my_affinity_l1(M):
return np.array([[custom_manhattan_dist(a, b) for a in M] for b in M])
def my_affinity_l2(M):
return np.array([[custom_euclidean_dist(a, b) for a in M] for b in M])
def my_affinity_m(M):
return np.array([[custom_manhattan_dist(a, b) for a in M] for b in M])
def my_affinity_c(M):
return np.array([[custom_cosine_dist(a, b) for a in M] for b in M])
def my_affinity_e(M):
# global dist_matrix
# dist_matrix = np.array([[mydist(a, b) for a in M]for b in M])
# return dist_matrix
return np.array([[custom_euclidean_dist(a, b) for a in M] for b in M])
def count_diff(x):
count = 0
compare = x[0]
for i in x:
if i != compare:
count += 1
return count
def count_mislabels(labels, true_labels):
# 2017-08-17: I will make the assumption that clusters have only 2 values.
# clusters = np.unique(true_labels)
# mislabels = 0
# for curr_clust in clusters:
# print("for label", curr_clust)
# print("\t", labels[(true_labels == curr_clust)])
# compare_to = mode(labels[(true_labels == curr_clust)])
# print("\tcompare to:", compare_to, "mislables: ", np.count_nonzero(labels[(true_labels == curr_clust)] != compare_to))
# mislabels += np.count_nonzero(labels[(true_labels == curr_clust)] != compare_to)
set_a = labels[true_labels == 0]
set_b = labels[true_labels == 1]
if len(set_a) <= len(set_b):
shorter = set_a
longer = set_b
else:
shorter = set_b
longer = set_a
long_mode = mode(longer) # this what the label of the longer cluster should be.
short_mode = 1 if long_mode == 0 else 0 # Choose the other value for the label of the shorter cluster
# start with the longer vector:
# print("The long set is", longer, "it has", np.count_nonzero(longer != long_mode), 'mislabels.')
# print("The short set is", shorter, "it has", np.count_nonzero(shorter != short_mode), 'mislabels.')
# np.count_nonzero(longer != long_mode) + np.count_nonzero(shorter != short_mode)
return np.count_nonzero(longer != long_mode) + np.count_nonzero(shorter != short_mode)
def plot_heatmap(df, col_order, row_order, top=5, title_text='differentially expressed genes per phenotype'):
if not (len(col_order), len(list(df))):
exit("Number of columns in dataframe do not match the columns provided for ordering.")
if not (len(row_order), len(df)):
exit("Number of rows in dataframe do not match the columns provided for ordering.")
# print(list(df), col_order)
df = df[col_order]
df = df.reindex(row_order)
plt.clf()
sns.heatmap(df.iloc[np.r_[0:top, -top:0], :], cmap='viridis')
plt.yticks(rotation=0)
plt.xticks(rotation=90)
plt.title('Top {} {}'.format(top, title_text))
plt.ylabel('Genes')
plt.xlabel('Sample')
plt.savefig('heatmap.png', dpi=300, bbox_inches="tight")
def parse_data(gct_name, row_normalization=False, col_normalization=False, row_centering=None, col_centering=None):
# if validators.url(gct_name):
# urlfile, __ = urllib.request.urlretrieve(gct_name)
# else:
# urlfile = gct_name
# f = open(urlfile)
# f.readline()
# size = f.readline().strip('\n').split('\t')
try:
data_df = pd.read_csv(gct_name, sep='\t', skiprows=2)
except ValueError:
data_df = gct_name
# print(size)
# print(list(data_df))
# exit(data_df.shape)
if data_df.index.name is 'Name':
data_df['Name'] = data_df.index
else:
if 'Name' not in list(data_df):
data_df['Name'] = data_df.iloc[:, 0]
data_df.drop(data_df.columns[0], axis=1, inplace=True)
if 'Description' not in list(data_df):
data_df['Description'] = data_df['Name']
data_df.set_index(data_df['Name'], inplace=True)
og_full_gct = data_df.copy()
og_full_gct.drop(['Name'], axis=1, inplace=True)
data_df.drop(['Name', 'Description'], axis=1, inplace=True)
plot_labels = list(og_full_gct.drop(['Description'], axis=1, inplace=False))
data = data_df.as_matrix()
row_labels = data_df.index.values
og_data = data.copy()
# if row_centering is not None:
# if row_centering == 'Mean':
# row_means = np.mean(data, axis=1)
# row_means_col_vec = row_means.reshape((data.shape[0], 1))
# data = data - row_means_col_vec
# if row_centering == 'Median':
# row_medians = np.median(data, axis=1)
# row_medians_col_vec = row_medians.reshape((data.shape[0], 1))
# data = data - row_medians_col_vec
#
# if row_normalization:
# row_norm = np.sum(data * data, axis=1)
# row_norm_col_vec = row_norm.reshape((data.shape[0], 1))
# data = data / np.sqrt(row_norm_col_vec)
#
# if col_centering is not None:
# if col_centering == 'Mean':
# col_means = np.mean(data, axis=0)
# data = data - col_means
# if col_centering == 'Median':
# col_medians = np.median(data, axis=0)
# data = data - col_medians
#
# if col_normalization:
# col_norm = np.sum(data*data, axis=0)
# data = data/np.sqrt(col_norm)
data = normalize_dataframe(data_df, log_normalize=None,
row_centering=row_centering, row_normalization=row_normalization,
col_centering=col_centering, col_normalization=col_normalization).as_matrix()
# print(data_df)
# print(data)
new_data_df = pd.DataFrame(data=data, index=data_df.index, columns=list(data_df))
# print(new_data_df)
# print(og_full_gct)
new_full_gct = new_data_df.copy()
new_full_gct.insert(0, column='Description', value=og_full_gct['Description'])
# print(new_full_gct)
# exit()
return og_data, data_df, data, new_data_df, plot_labels, row_labels, og_full_gct, new_full_gct
str2func = {
'custom_euclidean': my_affinity_e,
'uncentered_pearson': my_affinity_u,
'absolute_uncentered_pearson': my_affinity_au,
'information_coefficient': my_affinity_i,
'pearson': my_affinity_p,
'spearman': my_affinity_s,
'kendall': my_affinity_k,
'absolute_pearson': my_affinity_ap,
'l1': 'l1',
'l2': 'l2',
'manhattan': 'manhattan',
'cosine': 'cosine',
'euclidean': 'euclidean',
}
str2affinity_func = {
'custom_euclidean': my_affinity_e,
'uncentered_pearson': my_affinity_u,
'absolute_uncentered_pearson': my_affinity_au,
'information_coefficient': my_affinity_i,
'pearson': my_affinity_p,
'spearman': my_affinity_s,
'kendall': my_affinity_k,
'absolute_pearson': my_affinity_ap,
'l1': my_affinity_l1,
'l2': my_affinity_l2,
'manhattan': my_affinity_m,
'cosine': my_affinity_c,
'euclidean': my_affinity_e,
}
str2dist = {
'custom_euclidean': custom_euclidean_dist,
'uncentered_pearson': uncentered_pearson_dist,
'absolute_uncentered_pearson': absolute_uncentered_pearson_dist,
'information_coefficient': information_coefficient_dist,
'pearson': custom_pearson_dist,
'spearman': custom_spearman_dist,
'kendall': custom_kendall_tau_dist,
'absolute_pearson': absolute_pearson_dist,
'l1': custom_manhattan_dist,
'l2': custom_euclidean_dist,
'manhattan': custom_manhattan_dist,
'cosine': custom_cosine_dist,
'euclidean': custom_euclidean_dist,
}
str2similarity = {
'custom_euclidean': custom_euclidean_sim,
'uncentered_pearson': uncentered_pearson_corr,
'absolute_uncentered_pearson': absolute_uncentered_pearson_corr,
'information_coefficient': information_coefficient,
'pearson': custom_pearson_corr,
'spearman': custom_spearman_corr,
'kendall': custom_kendall_tau_corr,
'absolute_pearson': absolute_pearson_corr,
'l1': custom_manhattan_sim,
'l2': custom_euclidean_sim,
'manhattan': custom_manhattan_sim,
'cosine': custom_cosine_sim,
# 'euclidean': pairwise.paired_euclidean_distances,
'euclidean': custom_euclidean_sim,
# 'euclidean': custom_euclidean_dist,
}
linkage_dic = {
'Pairwise average-linkage': 'average',
'Pairwise complete-linkage': 'complete',
'Pairwise ward-linkage': 'ward',
'average': 'average',
'complete': 'complete',
'ward': 'ward',
}
def make_tree(model, data=None):
"""
Modified from:
https://stackoverflow.com/questions/27386641/how-to-traverse-a-tree-from-sklearn-agglomerativeclustering
import numpy as np
from sklearn.cluster import AgglomerativeClustering
import itertools
X = np.concatenate([np.random.randn(3, 10), np.random.randn(2, 10) + 100])
model = AgglomerativeClustering(linkage="average", affinity="cosine")
model.fit(X)
ii = itertools.count(X.shape[0])
[{'node_id': next(ii), 'left': x[0], 'right':x[1]} for x in model.children_]
---
You can also do dict(enumerate(model.children_, model.n_leaves_))
which will give you a dictionary where the each key is the ID of a node
and the value is the pair of IDs of its children. – user76284
:param model:
:return: a dictionary where the each key is the ID of a node and the value is the pair of IDs of its children.
"""
# ii = itertools.count(data.shape[0]) # Setting the counter at the number of leaves.
# tree = [{'node_id': next(ii), 'left': x[0], 'right':x[1]} for x in model.children_]
# print(tree)
# return tree
return dict(enumerate(model.children_, model.n_leaves_))
# return dict(enumerate(model.children_, 1))
def make_cdt(data, order_of_columns, order_of_rows, name='test.cdt', atr_companion=True, gtr_companion=False):
# TODO: if order_of_columns == None, then do arange(len(list(data)))
# TODO: if order_of_rows == None, then do arange(len(list(data)))
# exit(data.to_csv())
data.index.name = "ID"
data.rename(columns={'Description': 'Name'}, inplace=True)
temp = np.ones(len(data))
data.insert(loc=1, column='GWEIGHT', value=temp) # adding an extra column
# These three lines add a row
data.loc['EWEIGHT'] = list(np.ones(len(list(data))))
newIndex = ['EWEIGHT'] + [ind for ind in data.index if ind != 'EWEIGHT']
data = data.reindex(index=newIndex)
if atr_companion:
new_AID = ['', '']
for element in range(len(order_of_columns)):
temp = 'ARRY' + str(element) + 'X'
new_AID.append(temp)
data.loc['AID'] = new_AID
newIndex = ['AID'] + [ind for ind in data.index if ind != 'AID']
data = data.reindex(index=newIndex)
data = data[['Name', 'GWEIGHT'] + order_of_columns]
if gtr_companion:
new_GID = ['']
if atr_companion:
new_GID = ['AID', 'EWEIGHT'] # This is to make sure we fit the CDT format
# for element in np.sort(np.unique(GID)):
# if 'NODE' in element:
# # print(element, 'GTR delete')
# pass
# else:
# new_GID.append(element)
for element in range(len(order_of_rows)):
temp = 'GENE' + str(element) + 'X'
new_GID.append(temp)
data.insert(loc=0, column='GID', value=new_GID) # adding an extra column
data.insert(loc=0, column=data.index.name, value=data.index) # Making the index a column
# reorder to match dendogram
temp = ['AID', 'EWEIGHT'] + order_of_rows
# data = data.loc[temp]
# print(data['GID'])
data = data.reindex(temp)
# print(data['GID'])
# print(list(data.index))
# print(data['GID'])
# print(data['Name'])
# Making the 'GID' the index -- for printing purposes
data.index = data['GID']
data.index.name = 'GID'
data.drop(['GID'], axis=1, inplace=True)
# print(list(data.index))
# The first three lines need to be written separately due to a quirk in the CDT file format:
# print(data.to_csv(sep='\t', index=True, header=True))
f = open(name, 'w')
f.write(data.to_csv(sep='\t', index=True, header=True))
# f.write(data.to_csv(sep='\t', index=True, header=True))
f.close()
# pd.options.display.float_format = '{:3.3f}'.format
data = data.round(2)
# print(data.to_csv())
# exit()
# exit(data.to_csv(sep=' ', index=True, header=True, float_format='2',))
return
def make_atr(col_tree_dic, data, dist, clustering_method='average', file_name='test.atr'):
max_val = len(col_tree_dic)
# AID = []
# compute distances
distance_dic = {}
for node, children in col_tree_dic.items():
val = centroid_distances(children[0], children[1], tree=col_tree_dic, data=data, axis=1,
distance=dist, clustering_method=clustering_method)
# print(dist, children, val)
# print("Value is", val)
distance_dic[node] = val
# if dist == custom_euclidean_sim:
# print("Euclidean distance is especial, normalizing using this scheme:")
# low_norm = min(distance_dic.values())
# high_norm = max(distance_dic.values())
# for key in distance_dic.keys():
# # distance -= norm
# # distance_dic[key] = distance_dic[key]/high_norm
# # distance_dic[key] = (distance_dic[key]-low_norm)/high_norm
# # distance_dic[key] = distance_dic[key]/high_norm
# # distance_dic[key] = ((1/distance_dic[key])-high_norm)/low_norm
# print(distance_dic[key])
f = open(file_name, 'w')
for node, children in col_tree_dic.items():
elements = [translate_tree(node, max_val, 'atr'), translate_tree(children[0], max_val, 'atr'),
translate_tree(children[1], max_val, 'atr'),
"{num:.{width}f}".format(num=distance_dic[node], width=SIGNIFICANT_DIGITS)]
# print('\t', '\t'.join(elements))
# AID.append(translate_tree(children[0], max_val, 'atr'))
# AID.append(translate_tree(children[1], max_val, 'atr'))
f.write('\t'.join(elements) + '\n')
# print('\t'.join(elements) + '\n')
f.close()
return
def make_gtr(row_tree_dic, data, dist, clustering_method='average', file_name='test.gtr'):
max_val = len(row_tree_dic)
# GID = []
# compute distances
distance_dic = {}
for node, children in row_tree_dic.items():
val = centroid_distances(children[0], children[1], tree=row_tree_dic, data=data, axis=0,
distance=dist, clustering_method=clustering_method)
distance_dic[node] = val
f = open(file_name, 'w')
for node, children in row_tree_dic.items():
elements = [translate_tree(node, max_val, 'gtr'), translate_tree(children[0], max_val, 'gtr'),
translate_tree(children[1], max_val, 'gtr'),
"{num:.{width}f}".format(num=distance_dic[node], width=SIGNIFICANT_DIGITS)]
# GID.append(translate_tree(children[0], max_val, 'gtr'))
# GID.append(translate_tree(children[1], max_val, 'gtr'))
f.write('\t'.join(elements) + '\n')
# val -= 1
f.close()
return
def translate_tree(what, length, g_or_a):
if 'a' in g_or_a:
if what <= length:
translation = 'ARRY' + str(what) + 'X'
else:
translation = 'NODE' + str(what - length) + 'X'
elif 'g' in g_or_a:
if what <= length:
translation = 'GENE' + str(what) + 'X'
else:
translation = 'NODE' + str(what - length) + 'X'
else:
translation = []
print('This function does not support g_or_a=', g_or_a)
return translation
# def get_children_recursively(k, model, node_dict, leaf_count, n_samples, data, verbose=False, left=None, right=None):
# # print(k)
# i, j = model.children_[k]
#
# if k in node_dict:
# return node_dict[k]['children']
#
# if i < leaf_count:
# # print("i if")
# left = [i]
# else:
# # print("i else")
# # read the AgglomerativeClustering doc. to see why I select i-n_samples
# left, node_dict = get_children_recursively(i - n_samples, model, node_dict,
# leaf_count, n_samples, data, verbose, left, right)
#
# if j < leaf_count:
# # print("j if")
# right = [j]
# else:
# # print("j else")
# right, node_dict = get_children_recursively(j - n_samples, model, node_dict,
# leaf_count, n_samples, data, verbose, left, right)
#
# if verbose:
# print(k, i, j, left, right)
# temp = map(lambda ii: data[ii], left)
# left_pos = np.mean(list(temp), axis=0)
# temp = map(lambda ii: data[ii], right)
# right_pos = np.mean(list(temp), axis=0)
#
# # this assumes that agg_cluster used euclidean distances
# dist = metrics.pairwise_distances([left_pos, right_pos], metric='euclidean')[0, 1]
#
# all_children = [x for y in [left, right] for x in y]
# pos = np.mean(list(map(lambda ii: data[ii], all_children)), axis=0)
#
# # store the results to speed up any additional or recursive evaluations
# node_dict[k] = {'top_child': [i, j], 'children': all_children, 'pos': pos, 'dist': dist,
# 'node_i': k + n_samples}
# return all_children, node_dict
# def recursive_atr
def get_children(tree, leaves_are_self_children=False):
# this is a recursive function
expanded_tree = {}
for node in range(max(tree.keys())):
if node <= len(tree):
if leaves_are_self_children:
expanded_tree[node] = [node]
else:
expanded_tree[node] = []
else:
# expanded_tree[node] = list_children_single_node(node, tree)
expanded_tree[node] = list_children_single_node(node, tree, leaves_are_self_children)
return expanded_tree
def list_children_single_node(node, tree, leaves_are_self_children=False, only_leaves_are_children=True):
# children = []
if node <= len(tree):
if leaves_are_self_children:
children = [node]
else:
children = []
else:
children = list(tree[node])
# Check each child, and add their children to the list
for child in children:
if child <= len(tree):
pass
else:
children += list_children_single_node(child, tree, only_leaves_are_children=True)
if only_leaves_are_children:
# print(sorted(np.unique(i for i in children if i <= len(tree))))
# print()
return [i for i in sorted(np.unique(children)) if i <= len(tree)]
else:
return sorted(np.unique(children))
def centroid_distances(node_a, node_b, tree, data, axis=0, distance=mydist, clustering_method='average'):
if axis == 0:
pass
elif axis == 1:
data = np.transpose(data)
else:
exit("Variable 'data' does not have that many axises (╯°□°)╯︵ ┻━┻")
children_of_a = list_children_single_node(node_a, tree=tree, leaves_are_self_children=True)
children_of_b = list_children_single_node(node_b, tree=tree, leaves_are_self_children=True)
# if distance == custom_euclidean_sim:
# print("Euclidean distance is especial, normalizing using this scheme:")
# distance = custom_euclidean_dist
distances_list = []
if clustering_method == 'average':
for pair in itertools.product(data[children_of_a], data[children_of_b]):
distances_list.append(distance(pair[0], pair[1]))
return np.average(distances_list)
elif clustering_method == 'complete':
for pair in itertools.product(data[children_of_a], data[children_of_b]):
distances_list.append(distance(pair[0], pair[1]))
return np.min(distances_list)
else:
exit("Ony 'average' and 'complete' clustering methods are accepted at the moment (>_<)")
def euclidian_similarity(x, y):
dist = mydist(x, y)
# return 1/(1+dist)
return 1 / (np.exp(dist))
def better_dendodist(children, distance, tree, data, axis, clustering_method='average'):
distances_list = []
for pair in children:
distances_list.append(centroid_distances(pair[0], pair[1], tree, data, axis, distance=distance,
clustering_method=clustering_method))
# print(distance, pair, distances_list[-1])
return distances_list
def HierarchicalClustering(pwd: "The current directory",
gct_name: "Gene expression data filename (.gct file) or Pandas DataFrame "
"where rows are genes and columns are samples",
col_distance_metric: "The function to be used when comparing the distance/similarity of "
"the columns in the gct_name dataset",
row_distance_metric: "The function to be used when comparing the distance/similarity of "
"the rows in the gct_name dataset",
clustering_method: "Type of linkage to use" = 'average',
output_base_name: "Base name for output file" = 'HC_output',
row_normalization: "Whether to normalize each row (gene) in the data" = False,
col_normalization: "Whether to normalize each column (sample) in the data" = False,
row_centering: "How to center each row (gene) in the data" = 'Mean',
col_centering: "How to center each column (sample) in the data" = 'Mean',
output_distances: "Whether or not output the pair-wise distance matrix. "
"If true, the distance between each column will be called, "
"which can be very computationally intensive. "
"If unsure, leave as False." = False,
custom_plot: "Plot the dendrograms by Genes, Samples, or Both" = 'Both',
clusters_to_highlight: "How many clusters to highlight in the dendrogram" = 2,
show: "Whether to show the plot at the end" = False):
"""
This function performs hierarchical clustering to group samples (columns) with similar phenotypes
and/or genes (rows) with similar expression profiles.
:param pwd: The current directory
:param gct_name: Gene expression data filename (.gct file) or Pandas DataFrame where rows are genes and
columns are samples
:param col_distance_metric: The function to be used when comparing the distance/similarity of
the columns in the gct_name dataset
:param row_distance_metric: The function to be used when comparing the distance/similarity of
the rows in the gct_name dataset
:param clustering_method: Type of linkage to use
:param output_base_name: Base name for output file
:param row_normalization: Whether to normalize each row (gene) in the data
:param col_normalization: Whether to normalize each column (sample) in the data
:param row_centering: How to center each row (gene) in the data
:param col_centering: How to center each column (sample) in the data
:param output_distances: Whether or not output the pair-wise distance matrix.
If true, the distance between each column will be called,
which can be very computationally intensive.
If unsure, leave as False
:param custom_plot: Plot the dendrograms by Genes, Samples, or Both
:param clusters_to_highlight: How many clusters to highlight in the dendrogram
:param show: Whether to show the plot at the end
:return:
"""
# gct_name, col_distance_metric, output_distances, row_distance_metric, clustering_method, output_base_name, \
# row_normalization, col_normalization, row_centering, col_centering = parse_inputs(sys.argv)
if col_distance_metric == "No_column_clustering":
custom_plot = 'Genes'
if row_distance_metric == "No_row_clustering":
custom_plot = 'Samples'
og_data, og_data_df, data, data_df, col_labels, row_labels, og_full_gct, new_full_gct = \
parse_data(gct_name, row_normalization, col_normalization, row_centering, col_centering)
order_of_columns = list(data_df)
order_of_rows = list(data_df.index)
data_transpose = np.transpose(data)
# print(data)
# print(data_df)
atr_companion = False
col_model = None
col_tree = None
gtr_companion = False
row_model = None
row_tree = None
AID = None
GID = None
if col_distance_metric != 'No_column_clustering':
atr_companion = True
col_model = AgglomerativeClustering(linkage=linkage_dic[clustering_method], n_clusters=clusters_to_highlight,
affinity=str2func[col_distance_metric])
col_model.fit(data_transpose)
col_tree = make_tree(col_model)
order_of_columns = order_leaves(col_model, tree=col_tree, data=data_transpose,
dist=str2similarity[col_distance_metric], labels=col_labels, reverse=True)
path_to_atr = output_base_name + '.atr'
make_atr(col_tree, file_name=path_to_atr, data=data,
dist=str2similarity[col_distance_metric], clustering_method=linkage_dic[clustering_method])
if row_distance_metric != 'No_row_clustering':
gtr_companion = True
row_model = AgglomerativeClustering(linkage=linkage_dic[clustering_method], n_clusters=clusters_to_highlight,
affinity=str2func[row_distance_metric])
# y_col = row_model.fit_predict(np.transpose(data))
# print(y_col)
row_model.fit(data)
row_tree = make_tree(row_model)
order_of_rows = order_leaves(row_model, tree=row_tree, data=data,
dist=str2similarity[row_distance_metric], labels=row_labels)
path_to_gtr = output_base_name + '.gtr'
make_gtr(row_tree, data=data, file_name=output_base_name + '.gtr', dist=str2similarity[row_distance_metric])
if output_distances:
# TODO: check which col or row was selected, or both
row_distance_matrix = str2affinity_func[row_distance_metric](data)
# col_distance_matrix = str2affinity_func[col_distance_metric](np.transpose(data))
dist_file = open(output_base_name + '_pairwise_distances.csv', 'w')
dist_file.write('labels,')
dist_file.write(",".join(col_model.labels_.astype(str)) + "\n")
dist_file.write('samples,')
dist_file.write(",".join(list(data_df)) + "\n")
i = 0
for row in row_distance_matrix:
dist_file.write('distances row=' + str(i) + "," + ",".join(row.astype(str)) + "\n")
i += 1
path_to_cdt = output_base_name + '.cdt'
make_cdt(data=new_full_gct, name=path_to_cdt, atr_companion=atr_companion,
gtr_companion=gtr_companion,
order_of_columns=order_of_columns, order_of_rows=order_of_rows)
if custom_plot == 'Samples':
# Plotting the heatmap with dendrogram
plt.clf()
# fig = plt.figure(figsize=(16, 9), dpi=300)
fig = plt.figure(figsize=(16, 9))
gs = gridspec.GridSpec(2, 1, height_ratios=[1, 5])
gs.update(wspace=0.0, hspace=0.0)
ax0 = plt.subplot(gs[0]) # Doing dendrogram first
ax0.axis('off')
col_order, link = plot_dendrogram(col_model, data, col_tree, axis=1,
dist=str2similarity[col_distance_metric],
clustering_method=clustering_method,
color_threshold=clusters_to_highlight,
title='no_title.png', orientation='top')
col_order = [int(i) for i in col_order]
# print(col_order)
named_col_order = [col_labels[i] for i in col_order]
# print(named_col_order)
# print(col_order)
# print(col_model.labels_)
ax1 = plt.subplot(gs[1])
# Row-normalizing for display purposes only:
data_df = data_df.subtract(data_df.min(axis=1), axis=0)
data_df = data_df.div(data_df.max(axis=1), axis=0)
sns.heatmap(data_df[named_col_order], ax=ax1, cbar=False, cmap='bwr')
# ax1.xaxis.tick_top()
[label.set_rotation(90) for label in ax1.get_xticklabels()]
file_path_plot = output_base_name + '.pdf'
plt.savefig(file_path_plot, bbox_inches='tight')
print("----------------------------------------------------------------------")
print("The PDF of this heatmap can be downloaded here:")
display(HTML('<a href="' + file_path_plot + '" target="_blank">PDF of the heatmap</a>'))
print("----------------------------------------------------------------------")
print("The CDF which is compatible with HierarchicalClusteringViewer is here:")
display(HTML('<a href="' + path_to_cdt + '" target="_blank">TXT containing the output data</a>'))
print("----------------------------------------------------------------------")
print("The ATR which is compatible with HierarchicalClusteringViewer is here:")
display(HTML('<a href="' + path_to_atr + '" target="_blank">TXT containing the output data</a>'))
print("----------------------------------------------------------------------")
if show:
# plt.show()
pass
# col_order = [int(i) for i in col_order]
# print(col_order)
# named_col_order = [col_labels[i] for i in col_order]
# print(named_col_order)
# print(col_order)
# print(idxs2clusters)
cls_list = col_model.labels_
# for i in range(len(col_order)):
# cls_list.append(idxs2clusters[i])
# print(cls_list)
# order_by = [col_order.index(i) for i in range(len(col_order))]
# list2intlist(cls_list, custom_order=order_by)
# in_list = np.array(cls_list)
# print(cls_list)
# print(np.array(list2intlist(cls_list, custom_order=order_by)))
list2cls(np.array(list2intlist(cls_list)), name_of_out=output_base_name+'.cls', sep=' ')
if custom_plot == 'Genes':
# Plotting the heatmap with dendrogram
plt.clf()
# fig = plt.figure(figsize=(16, 9), dpi=300)
fig = plt.figure(figsize=(16, 9))
gs = gridspec.GridSpec(1, 2, width_ratios=[5, 1])
gs.update(wspace=0.0, hspace=0.0)
ax0 = plt.subplot(gs[1]) # Doing dendrogram first
ax0.axis('off')
row_order, link = plot_dendrogram(row_model, data_transpose, row_tree, axis=1,
dist=str2similarity[row_distance_metric],
clustering_method=clustering_method,
color_threshold=clusters_to_highlight,
orientation='right', title='no_title.png')
# row_order = [int(i) for i in row_order]
# named_row_order = [row_labels[i] for i in row_order]
ax1 = plt.subplot(gs[0])
# Row-normalizing for display purposes only:
data_df = data_df.subtract(data_df.min(axis=1), axis=0)
data_df = data_df.div(data_df.max(axis=1), axis=0)
sns.heatmap(data_df.iloc[row_order], ax=ax1, cbar=False, cmap='bwr')
# ax1.xaxis.tick_top()
[label.set_rotation(90) for label in ax1.get_xticklabels()]
file_path_plot = output_base_name + '.pdf'
plt.savefig(file_path_plot, bbox_inches='tight')
print("----------------------------------------------------------------------")
print("The PDF of this heatmap can be downloaded here:")
display(HTML('<a href="' + file_path_plot + '" target="_blank">PDF of the heatmap</a>'))
print("----------------------------------------------------------------------")
print("The CDF which is compatible with HierarchicalClusteringViewer is here:")
display(HTML('<a href="' + path_to_cdt + '" target="_blank">TXT containing the output data</a>'))
print("----------------------------------------------------------------------")
print("The GTR which is compatible with HierarchicalClusteringViewer is here:")
display(HTML('<a href="' + path_to_gtr + '" target="_blank">TXT containing the output data</a>'))
print("----------------------------------------------------------------------")
if show:
plt.show()
if custom_plot == 'Both':
# Plotting the heatmap with dendrogram
plt.clf()
# fig = plt.figure(figsize=(16, 9), dpi=300)
fig = plt.figure(figsize=(16, 9))
gs = gridspec.GridSpec(2, 2, width_ratios=[5, 1], height_ratios=[1, 5])
gs.update(wspace=0.0, hspace=0.0)
# Doing TOP dendrogram first
ax0 = plt.subplot(gs[0])
ax0.axis('off')
col_order, link = plot_dendrogram(col_model, data, col_tree, axis=1,
dist=str2similarity[col_distance_metric],
clustering_method=clustering_method,
color_threshold=clusters_to_highlight,
title='no_title.png', orientation='top')
col_order = [int(i) for i in col_order]
named_col_order = [col_labels[i] for i in col_order]
# Doing RIGHT dendrogram
ax3 = plt.subplot(gs[3])
ax3.axis('off')
row_order, link = plot_dendrogram(row_model, data_transpose, row_tree, axis=1,
dist=str2similarity[row_distance_metric],
clustering_method=clustering_method,
color_threshold=clusters_to_highlight,
orientation='right', title='no_title.png')
# Plotting the heatmap now
ax1 = plt.subplot(gs[2])
# Row-normalizing for display purposes only:
data_df = data_df.subtract(data_df.min(axis=1), axis=0)
data_df = data_df.div(data_df.max(axis=1), axis=0)
sns.heatmap(data_df[named_col_order].iloc[row_order], ax=ax1, cbar=False, cmap='bwr')
# ax1.xaxis.tick_top()
[label.set_rotation(90) for label in ax1.get_xticklabels()]
file_path_plot = output_base_name + '.pdf'
plt.savefig(file_path_plot, bbox_inches='tight')
print("----------------------------------------------------------------------")
print("The PDF of this heatmap can be downloaded here:")
display(HTML('<a href="' + file_path_plot + '" target="_blank">PDF of the heatmap</a>'))
print("----------------------------------------------------------------------")
print("The CDF which is compatible with HierarchicalClusteringViewer is here:")
display(HTML('<a href="' + path_to_cdt + '" target="_blank">TXT containing the output data</a>'))
print("----------------------------------------------------------------------")
print("The GTR which is compatible with HierarchicalClusteringViewer is here:")
display(HTML('<a href="' + path_to_gtr + '" target="_blank">TXT containing the output data</a>'))
print("----------------------------------------------------------------------")
if show:
plt.show()
return col_model, row_model
def hc_samples(
input_gene_expression: "gene expression data filename (.gct file) where rows are genes and columns are samples",
clustering_type: "single or consensus -- Only single is suported at the moment",
distance_metric: "the function to be used when comparing the distance/similarity of the columns in the "
"input_gene_expression dataset",
file_basename: "the name to use when naming output files" = 'HC_out',
clusters_to_highlight: "how many clusters to highlight in the dendrogram" = None):
"""
Perform hierarchical clustering to group samples with similar phenotypes.
:param input_gene_expression: str; gene expression data filename (.gct file)
where rows are genes and columns are samples
:param clustering_type: str; single or consensus
:param distance_metric: str; the function to be used when comparing the distance/similarity of the columns
in the input_gene_expression dataset
:param file_basename: str; the name to use when naming output files
:param clusters_to_highlight: int; how many clusters to highlight in the dendrogram
:return: object; Sklearn's AgglomerativeClustering fitted model
"""
print("Currenty clustering_type is being ignored, only 'single' is supported.")
pwd = '.'
gct_name = input_gene_expression
col_distance_metric = distance_metric
output_distances = False
row_distance_metric = 'No_row_clustering'
clustering_method = 'average'
output_base_name = file_basename
row_normalization = False
col_normalization = False
row_centering = 'Mean'
col_centering = 'Mean'
custom_plot = 'Samples'
show = True
# print("This are the parameters to be used (for debugging purposes)")
# print("""
# pwd = '.'
# gct_name = {gct_name}
# col_distance_metric = {col_distance_metric}
# output_distances = {output_distances}
# row_distance_metric = {row_distance_metric}
# clustering_method = {clustering_method}
# output_base_name = {output_base_name}
# row_normalization = {row_normalization}
# col_normalization = {col_normalization}
# row_centering = {row_centering}
# col_centering = {col_centering}
# """.format(
# gct_name=gct_name, col_distance_metric=col_distance_metric,
# output_distances=str(output_distances),
# row_distance_metric=row_distance_metric, clustering_method=clustering_method,
# output_base_name=output_base_name,
# row_normalization=str(row_normalization), col_normalization=str(col_normalization),
# row_centering=row_centering, col_centering=col_centering
# )
# )
print("Now we will start performing hierarchical clustering, this may take a little while.")
col_model, row_model = HierarchicalClustering(pwd,
gct_name,
col_distance_metric,
row_distance_metric,
clustering_method,
output_base_name,
row_normalization,
col_normalization,
row_centering,
col_centering,
output_distances,
custom_plot,
clusters_to_highlight,
show)
print("Done with Hierarchical Clustering!")
return col_model
def hc_genes(
input_gene_expression: "gene expression data filename (.gct file) where rows are genes and columns are samples",
clustering_type: "single or consensus -- Only single is suported at the moment",
distance_metric: "the function to be used when comparing the distance/similarity of the rows in the "
"input_gene_expression dataset",
file_basename: "the name to use when naming output files" = 'HC_out',
clusters_to_highlight: "how many clusters to highlight in the dendrogram" = None):
"""
Perform hierarchical clustering to group genes with similar expression profile.
:param input_gene_expression: str; gene expression data filename (.gct file)
where rows are genes and columns are samples
:param clustering_type: str; single or consensus
:param distance_metric: str; the function to be used when comparing the distance/similarity of the rows
in the input_gene_expression dataset
:param file_basename: str; the name to use when naming output files
:param clusters_to_highlight: int; how many clusters to highlight in the dendrogram
:return: object; Sklearn's AgglomerativeClustering fitted model
"""
print("Currenty clustering_type is being ignored, only 'single' is supported.")
pwd = '.'
gct_name = input_gene_expression
col_distance_metric = 'No_column_clustering'
output_distances = False
row_distance_metric = distance_metric
clustering_method = 'average'
output_base_name = file_basename
row_normalization = False
col_normalization = False
row_centering = 'Mean'
col_centering = 'Mean'
custom_plot = 'Genes'
show = True
# print("This are the parameters to be used (for debugging purposes)")
# print("""
# pwd = '.'
# gct_name = {gct_name}
# col_distance_metric = {col_distance_metric}
# output_distances = {output_distances}
# row_distance_metric = {row_distance_metric}
# clustering_method = {clustering_method}
# output_base_name = {output_base_name}
# row_normalization = {row_normalization}
# col_normalization = {col_normalization}
# row_centering = {row_centering}
# col_centering = {col_centering}
# """.format(
# gct_name=gct_name, col_distance_metric=col_distance_metric,
# output_distances=str(output_distances),
# row_distance_metric=row_distance_metric, clustering_method=clustering_method,
# output_base_name=output_base_name,
# row_normalization=str(row_normalization), col_normalization=str(col_normalization),
# row_centering=row_centering, col_centering=col_centering
# )
# )
print("Now we will start performing hierarchical clustering, this may take a little while.")
col_model, row_model = HierarchicalClustering(pwd,
gct_name,
col_distance_metric,
row_distance_metric,
clustering_method,
output_base_name,
row_normalization,
col_normalization,
row_centering,
col_centering,
output_distances,
custom_plot,
clusters_to_highlight,
show)
print("Done with Hierarchical Clustering!")
return row_model
def normalize_dataframe(df, log_normalize=None,
row_centering='Mean', row_normalization=True,
col_centering='Mean', col_normalization=True):
"""
This function Takes in a DataFrame and some flags and normalizes the data it contains. Order of operations is:
1- Log-normalize
2- Row (gene) center
3- Row (gene) normalize
4- Column (sample) center
5- Column (sample) normalize
:param df: (Pandas DataFrame) A DataFrame to be normalized
:param log_normalize:(float, None) Whether to log-normalize the data. Value is the base of the logarithm to use
:param row_centering: Whether or not to subtract the mean or median from every element of each row
:param row_normalization: Whether or not to set the maximum value of a row to 1 and the minimum value to 0
:param col_centering: Whether or not to subtract the mean or median from every element of each column
:param col_normalization: Whether or not to set the maximum value of a column to 1 and the minimum value to 0
:return:
"""
if (log_normalize is None) \
and (row_centering == 'No') and (col_centering == 'No') \
and (row_normalization is False) and (col_normalization is False):
print("No normalization has been requested ಠ_ಠ¯")
return df
data = df.as_matrix()
# Log Normalizing
if log_normalize is not None:
print("I'm sorry, log-normalization is not supported at the moment (u_u)")
# Row Centering
if row_centering != 'No':
if row_centering == 'Mean':
row_means = np.mean(data, axis=1)
row_means_col_vec = row_means.reshape((data.shape[0], 1))
data = data - row_means_col_vec
elif row_centering == 'Median':
row_medians = np.median(data, axis=1)
row_medians_col_vec = row_medians.reshape((data.shape[0], 1))
data = data - row_medians_col_vec
else:
print("row_centering has an unexpected value:", row_centering)
# Row Normalizing
if row_normalization:
row_norm = np.sum(data * data, axis=1)
row_norm_col_vec = row_norm.reshape((data.shape[0], 1))
data = data / np.sqrt(row_norm_col_vec)
# Column Centering
if col_centering != 'No':
if col_centering == 'Mean':
col_means = np.mean(data, axis=0)
data = data - col_means
elif col_centering == 'Median':
col_medians = np.median(data, axis=0)
data = data - col_medians
else:
print("col_centering has an unexpected value: ", col_centering)
# Column Normalizing
if col_normalization:
col_norm = np.sum(data * data, axis=0)
data = data / np.sqrt(col_norm)
normalized_df = pd.DataFrame(data=data, index=df.index, columns=list(df))
return normalized_df
def display_heatmap(data,
name='heatmap',
log_normalize=None,
row_centering: "How to center each row (gene) in the data" = 'No',
row_normalization: "Whether to normalize each row (gene) in the data" = True,
col_centering: "How to center each column (sample) in the data" = 'No',
col_normalization: "Whether to normalize each column (sample) in the data" = False,
mostrar=False):
if isinstance(data, pd.DataFrame):
data_to_plot = data.copy()
elif os.path.isfile(data):
data_to_plot = pd.read_table(data, skiprows=2, sep='\t')
data_to_plot.set_index('Name', inplace=True)
data_to_plot.drop('Description', axis=1, inplace=True)
else:
try:
data_to_plot = pd.read_table(data, skiprows=2, sep='\t')
except urllib.error.HTTPError:
print("I don't know what the variable 'data' contains.")
print('data=')
print(data)
exit("If this is a url it may not be accessible.\n"
"(╯°□°)╯︵ ┻━┻")
data_to_plot.set_index('Name', inplace=True)
data_to_plot.drop('Description', axis=1, inplace=True)
data_to_plot = normalize_dataframe(data_to_plot, log_normalize=log_normalize,
row_centering=row_centering, row_normalization=row_normalization,
col_centering=col_centering, col_normalization=col_normalization)
plt.clf()
# # figure reshape from:
# # https://stackoverflow.com/questions/35127920/overlapping-yticklabels-is-it-possible-to-control-cell-size-of-heatmap-in-seabo
# # and from:
# # https://matplotlib.org/users/customizing.html
# get the tick label font size
fontsize_pt = plt.rcParams['ytick.labelsize']
dpi = 72.27
# compute the matrix height in points and inches
matrix_height_pt = fontsize_pt * data_to_plot.as_matrix().shape[0]
matrix_height_in = (matrix_height_pt / dpi) * 1.2
# compute the required figure height
top_margin = 0.01 # in percentage of the figure height
bottom_margin = 0.01 # in percentage of the figure height
figure_height = matrix_height_in / (1 - top_margin - bottom_margin)
# build the figure instance with the desired height
fig, ax = plt.subplots(
figsize=(6, figure_height),
gridspec_kw=dict(top=1 - top_margin, bottom=bottom_margin))
sns.heatmap(data_to_plot, cmap='bwr', yticklabels=True, square=True,
cbar_kws={'use_gridspec': False,
'location': "right",
'shrink': 0.5,
'label': ''}
)
if not name.endswith('.pdf'):
name = name + '.pdf'
plt.savefig(name, dpi=dpi, bbox_inches='tight')
# plt.savefig(name, dpi=dpi)
print(name, "has been created!")
if mostrar:
# print(data_to_plot.head())
plt.show()
print("The PDF of this heatmap can be downloaded here:")
display(HTML('<a href="' + name + '" target="_blank">PDF of the heatmap</a>'))
return
| 41.66015 | 132 | 0.623869 | import sys
import numpy as np
from statistics import mode
from sklearn.metrics import pairwise
from sklearn import metrics
from scipy.cluster.hierarchy import dendrogram
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
import itertools
from sklearn.cluster import AgglomerativeClustering
import scipy
import itertools
from collections import defaultdict
from .elemental import *
from .information import *
import os
import sys
tasklib_path = os.path.dirname(os.path.realpath(sys.argv[0]))
from IPython.core.display import display, HTML
import scipy
import seaborn as sns
from matplotlib import pyplot as plt
from matplotlib import gridspec
from sklearn.cluster import AgglomerativeClustering
sns.set_style("white")
import matplotlib as mpl
mpl.rcParams['ytick.labelsize'] = 16
mpl.rcParams['xtick.labelsize'] = 16
mpl.rcParams['axes.titlesize'] = 24
mpl.rcParams['axes.labelsize'] = 20
SIGNIFICANT_DIGITS = 7
input_col_distance_dict = {
"No column clustering": "No_column_clustering",
"Uncentered correlation": "uncentered_pearson",
"Pearson correlation": "pearson",
"Uncentered correlation, absolute value": "absolute_uncentered_pearson",
"Pearson correlation, absolute value": "absolute_pearson",
"Spearman's rank correlation": "spearman",
"Kendall's tau": "kendall",
"Euclidean distance": "euclidean",
"City-block distance": "manhattan",
"No_column_clustering": "No_column_clustering",
"0": "No_column_clustering",
"1": "uncentered_pearson",
"2": "pearson",
"3": "absolute_uncentered_pearson",
"4": "absolute_pearson",
"5": "spearman",
"6": "kendall",
"7": "euclidean",
"8": "manhattan",
"9": "information_coefficient",
"no_col": "No_column_clustering",
"uncentered_pearson": "uncentered_pearson",
"pearson": "pearson",
"absolute_uncentered_pearson": "absolute_uncentered_pearson",
"absolute_pearson": "absolute_pearson",
"spearman": "spearman",
"kendall": "kendall",
"euclidean": "euclidean",
"manhattan": "manhattan",
"Cosine": "cosine",
"cosine": "cosine",
"ic": "information_coefficient",
"information_coefficient": "information_coefficient",
"Information Coefficient": "information_coefficient",
}
input_row_distance_dict = {
"No row clustering": "No_row_clustering",
"Uncentered correlation": "uncentered_pearson",
"Pearson correlation": "pearson",
"Uncentered correlation, absolute value": "absolute_uncentered_pearson",
"Pearson correlation, absolute value": "absolute_pearson",
"Spearman's rank correlation": "spearman",
"Kendall's tau": "kendall",
"Euclidean distance": "euclidean",
"City-block distance": "manhattan",
"No_row_clustering": "No_row_clustering",
"0": "No_row_clustering",
"1": "uncentered_pearson",
"2": "pearson",
"3": "absolute_uncentered_pearson",
"4": "absolute_pearson",
"5": "spearman",
"6": "kendall",
"7": "euclidean",
"8": "manhattan",
"9": "information_coefficient",
"no_row": "No_row_clustering",
"uncentered_pearson": "uncentered_pearson",
"pearson": "pearson",
"absolute_uncentered_pearson": "absolute_uncentered_pearson",
"absolute_pearson": "absolute_pearson",
"spearman": "spearman",
"kendall": "kendall",
"euclidean": "euclidean",
"manhattan": "manhattan",
"Cosine": "cosine",
"cosine": "cosine",
"ic": "information_coefficient",
"information_coefficient": "information_coefficient",
"Information Coefficient": "information_coefficient",
}
input_clustering_method = {
'Pairwise complete-linkage': 'complete',
'Pairwise average-linkage': 'average',
'Pairwise ward-linkage': 'ward',
'm': 'complete',
'a': 'average',
}
input_row_centering = {
'No': None,
'Subtract the mean from each row': 'Mean',
'Subtract the median from each row': 'Median',
'None': None,
'Median': 'Median',
'Mean': 'Mean',
}
input_row_normalize = {
'No': False,
'Yes': True,
'False': False,
'True': True,
}
input_col_centering = {
'No': None,
'Subtract the mean from each column': 'Mean',
'Subtract the median from each column': 'Median',
'None': None,
'Median': 'Median',
'Mean': 'Mean',
}
input_col_normalize = {
'No': False,
'Yes': True,
'False': False,
'True': True,
}
def parse_inputs(args=sys.argv):
arg_n = len(args)
if arg_n == 1:
sys.exit("Not enough parameters files were provided. This module needs a GCT file to work.")
elif arg_n == 2:
gct_name = args[1]
col_distance_metric = 'euclidean'
output_distances = False
row_distance_metric = 'No_row_clustering'
clustering_method = 'Pairwise average-linkage'
output_base_name = 'HC_out'
row_normalization = False
col_normalization = False
row_centering = None
col_centering = None
print("Using:")
print("\tgct_name =", gct_name)
print("\tcol_distance_metric = euclidean (default value)")
print("\toutput_distances =", output_distances, "(default: not computing it and creating a file)")
print("\trow_distance_metric =", row_distance_metric, "(default: No row clustering)")
print("\tclustering_method =", clustering_method, "(default: Pairwise average-linkage)")
print("\toutput_base_name =", output_base_name, "(default: HC_out)")
print("\trow_normalization =", row_normalization, "(default: False)")
print("\tcol_normalization =", col_normalization, "(default: False)")
print("\trow_centering =", row_centering, "(default: None)")
print("\tcol_centering =", col_centering, "(default: None)")
elif arg_n == 3:
gct_name = args[1]
col_distance_metric = args[2]
output_distances = False
row_distance_metric = 'No_row_clustering'
clustering_method = 'Pairwise average-linkage'
output_base_name = 'HC_out'
row_normalization = False
col_normalization = False
row_centering = None
col_centering = None
print("Using:")
print("\tgct_name =", gct_name)
print("\tcol_distance_metric =", input_col_distance_dict[col_distance_metric])
print("\toutput_distances =", output_distances, "(default: not computing it and creating a file)")
print("\trow_distance_metric =", row_distance_metric, "(default: No row clustering)")
print("\tclustering_method =", clustering_method, "(default: Pairwise average-linkage)")
print("\toutput_base_name =", output_base_name, "(default: HC_out)")
print("\trow_normalization =", row_normalization, "(default: False)")
print("\tcol_normalization =", col_normalization, "(default: False)")
print("\trow_centering =", row_centering, "(default: None)")
print("\tcol_centering =", col_centering, "(default: None)")
elif arg_n == 4:
gct_name = args[1]
col_distance_metric = args[2]
output_distances = args[3]
row_distance_metric = 'No_row_clustering'
clustering_method = 'Pairwise average-linkage'
output_base_name = 'HC_out'
row_normalization = False
col_normalization = False
row_centering = None
col_centering = None
col_distance_metric = input_col_distance_dict[col_distance_metric]
if (output_distances == 'False') or (output_distances == 'F') \
or (output_distances == 'false') or (output_distances == 'f'):
output_distances = False
else:
output_distances = True
print("Using:")
print("\tgct_name =", gct_name)
print("\tcol_distance_metric =", col_distance_metric)
print("\toutput_distances =", output_distances)
print("\trow_distance_metric =", row_distance_metric, "(default: No row clustering)")
print("\tclustering_method =", clustering_method, "(default: Pairwise average-linkage)")
print("\toutput_base_name =", output_base_name, "(default: HC_out)")
print("\trow_normalization =", row_normalization, "(default: False)")
print("\tcol_normalization =", col_normalization, "(default: False)")
print("\trow_centering =", row_centering, "(default: None)")
print("\tcol_centering =", col_centering, "(default: None)")
elif arg_n == 5:
gct_name = args[1]
col_distance_metric = args[2]
output_distances = args[3]
row_distance_metric = args[4]
clustering_method = 'Pairwise average-linkage'
output_base_name = 'HC_out'
row_normalization = False
col_normalization = False
row_centering = None
col_centering = None
col_distance_metric = input_col_distance_dict[col_distance_metric]
row_distance_metric = input_row_distance_dict[row_distance_metric]
if (output_distances == 'False') or (output_distances == 'F') \
or (output_distances == 'false') or (output_distances == 'f'):
output_distances = False
else:
output_distances = True
print("Using:")
print("\tgct_name =", gct_name)
print("\tcol_distance_metric =", col_distance_metric)
print("\toutput_distances =", output_distances)
print("\trow_distance_metric =", row_distance_metric)
print("\tclustering_method =", clustering_method, "(default: Pairwise average-linkage)")
print("\toutput_base_name =", output_base_name, "(default: HC_out)")
print("\trow_normalization =", row_normalization, "(default: False)")
print("\tcol_normalization =", col_normalization, "(default: False)")
print("\trow_centering =", row_centering, "(default: None)")
print("\tcol_centering =", col_centering, "(default: None)")
elif arg_n == 6:
gct_name = args[1]
col_distance_metric = args[2]
output_distances = args[3]
row_distance_metric = args[4]
clustering_method = args[5]
col_distance_metric = input_col_distance_dict[col_distance_metric]
row_distance_metric = input_row_distance_dict[row_distance_metric]
clustering_method = input_clustering_method[clustering_method]
if clustering_method not in linkage_dic.keys():
exit("Clustering method chosen not supported. This should not have happened.")
if (linkage_dic[clustering_method] == 'ward') and (col_distance_metric != 'average'):
exit("When choosing 'Pairwise ward-linkage' the distance metric *must* be 'average' ")
output_base_name = 'HC_out'
row_normalization = False
col_normalization = False
row_centering = None
col_centering = None
if (output_distances == 'False') or (output_distances == 'F') \
or (output_distances == 'false') or (output_distances == 'f'):
output_distances = False
else:
output_distances = True
print("Using:")
print("\tgct_name =", gct_name)
print("\tcol_distance_metric =", col_distance_metric)
print("\toutput_distances =", output_distances)
print("\trow_distance_metric =", row_distance_metric)
print("\tclustering_method =", clustering_method)
print("\toutput_base_name =", output_base_name, "(default: HC_out)")
print("\trow_normalization =", row_normalization, "(default: False)")
print("\tcol_normalization =", col_normalization, "(default: False)")
print("\trow_centering =", row_centering, "(default: None)")
print("\tcol_centering =", col_centering, "(default: None)")
elif arg_n == 7:
gct_name = args[1]
col_distance_metric = args[2]
output_distances = args[3]
row_distance_metric = args[4]
clustering_method = args[5]
output_base_name = args[6]
row_normalization = False
col_normalization = False
row_centering = None
col_centering = None
col_distance_metric = input_col_distance_dict[col_distance_metric]
row_distance_metric = input_row_distance_dict[row_distance_metric]
clustering_method = input_clustering_method[clustering_method]
if (output_distances == 'False') or (output_distances == 'F') \
or (output_distances == 'false') or (output_distances == 'f'):
output_distances = False
else:
output_distances = True
print("Using:")
print("\tgct_name =", gct_name)
print("\tcol_distance_metric =", col_distance_metric)
print("\toutput_distances =", output_distances)
print("\trow_distance_metric =", row_distance_metric)
print("\tclustering_method =", clustering_method)
print("\toutput_base_name =", output_base_name)
print("\trow_normalization =", row_normalization, "(default: False)")
print("\tcol_normalization =", col_normalization, "(default: False)")
print("\trow_centering =", row_centering, "(default: None)")
print("\tcol_centering =", col_centering, "(default: None)")
elif arg_n == 8:
gct_name = args[1]
col_distance_metric = args[2]
output_distances = args[3]
row_distance_metric = args[4]
clustering_method = args[5]
output_base_name = args[6]
row_normalization = args[7]
col_normalization = False
row_centering = None
col_centering = None
col_distance_metric = input_col_distance_dict[col_distance_metric]
row_distance_metric = input_row_distance_dict[row_distance_metric]
clustering_method = input_clustering_method[clustering_method]
if (output_distances == 'False') or (output_distances == 'F') \
or (output_distances == 'false') or (output_distances == 'f'):
output_distances = False
else:
output_distances = True
row_normalization = input_row_normalize[row_normalization]
print("Using:")
print("\tgct_name =", gct_name)
print("\tcol_distance_metric =", col_distance_metric)
print("\toutput_distances =", output_distances)
print("\trow_distance_metric =", row_distance_metric)
print("\tclustering_method =", clustering_method)
print("\toutput_base_name =", output_base_name)
print("\trow_normalization =", row_normalization)
print("\tcol_normalization =", col_normalization, "(default: False)")
print("\trow_centering =", row_centering, "(default: None)")
print("\tcol_centering =", col_centering, "(default: None)")
elif arg_n == 9:
gct_name = args[1]
col_distance_metric = args[2]
output_distances = args[3]
row_distance_metric = args[4]
clustering_method = args[5]
output_base_name = args[6]
row_normalization = args[7]
col_normalization = args[8]
row_centering = None
col_centering = None
col_distance_metric = input_col_distance_dict[col_distance_metric]
row_distance_metric = input_row_distance_dict[row_distance_metric]
clustering_method = input_clustering_method[clustering_method]
if (output_distances == 'False') or (output_distances == 'F') \
or (output_distances == 'false') or (output_distances == 'f'):
output_distances = False
else:
output_distances = True
row_normalization = input_row_normalize[row_normalization]
col_normalization = input_col_normalize[col_normalization]
print("Using:")
print("\tgct_name =", gct_name)
print("\tcol_distance_metric =", col_distance_metric)
print("\toutput_distances =", output_distances)
print("\trow_distance_metric =", row_distance_metric)
print("\tclustering_method =", clustering_method)
print("\toutput_base_name =", output_base_name)
print("\trow_normalization =", row_normalization)
print("\tcol_normalization =", col_normalization)
print("\trow_centering =", row_centering, "(default: None)")
print("\tcol_centering =", col_centering, "(default: None)")
elif arg_n == 10:
gct_name = args[1]
col_distance_metric = args[2]
output_distances = args[3]
row_distance_metric = args[4]
clustering_method = args[5]
output_base_name = args[6]
row_normalization = args[7]
col_normalization = args[8]
row_centering = args[9]
col_centering = None
col_distance_metric = input_col_distance_dict[col_distance_metric]
row_distance_metric = input_row_distance_dict[row_distance_metric]
clustering_method = input_clustering_method[clustering_method]
if (output_distances == 'False') or (output_distances == 'F') \
or (output_distances == 'false') or (output_distances == 'f'):
output_distances = False
else:
output_distances = True
row_normalization = input_row_normalize[row_normalization]
col_normalization = input_col_normalize[col_normalization]
row_centering = input_row_centering[row_centering]
if (row_centering == 'None') or (col_normalization == 'N') \
or (row_centering == 'none') or (col_normalization == 'n'):
col_normalization = None
print("Using:")
print("\tgct_name =", gct_name)
print("\tcol_distance_metric =", col_distance_metric)
print("\toutput_distances =", output_distances)
print("\trow_distance_metric =", row_distance_metric)
print("\tclustering_method =", clustering_method)
print("\toutput_base_name =", output_base_name)
print("\trow_normalization =", row_normalization)
print("\tcol_normalization =", col_normalization)
print("\trow_centering =", row_centering)
print("\tcol_centering =", col_centering, "(default: None)")
elif arg_n == 11:
gct_name = args[1]
col_distance_metric = args[2]
output_distances = args[3]
row_distance_metric = args[4]
clustering_method = args[5]
output_base_name = args[6]
row_normalization = args[7]
col_normalization = args[8]
row_centering = args[9]
col_centering = args[10]
col_distance_metric = input_col_distance_dict[col_distance_metric]
row_distance_metric = input_row_distance_dict[row_distance_metric]
clustering_method = input_clustering_method[clustering_method]
if (output_distances == 'False') or (output_distances == 'F') \
or (output_distances == 'false') or (output_distances == 'f'):
output_distances = False
else:
output_distances = True
row_normalization = input_row_normalize[row_normalization]
col_normalization = input_col_normalize[col_normalization]
row_centering = input_row_centering[row_centering]
if (row_centering == 'None') or (col_normalization == 'N') \
or (row_centering == 'none') or (col_normalization == 'n'):
col_normalization = None
col_centering = input_col_centering[col_centering]
if (col_centering == 'None') or (col_centering == 'N') \
or (col_centering == 'none') or (col_centering == 'n'):
col_centering = None
print("Using:")
print("\tgct_name =", gct_name)
print("\tcol_distance_metric =", col_distance_metric)
print("\toutput_distances =", output_distances)
print("\trow_distance_metric =", row_distance_metric)
print("\tclustering_method =", clustering_method)
print("\toutput_base_name =", output_base_name)
print("\trow_normalization =", row_normalization)
print("\tcol_normalization =", col_normalization)
print("\trow_centering =", row_centering)
print("\tcol_centering =", col_centering)
else:
sys.exit("Too many inputs. This module needs only a GCT file to work, "
"plus an optional input choosing between Pearson Correlation or Information Coefficient.")
print(args)
return gct_name, col_distance_metric, output_distances, row_distance_metric, clustering_method, output_base_name, \
row_normalization, col_normalization, row_centering, col_centering
def plot_dendrogram(model, data, tree, axis, dist=mydist, clustering_method='average',
title='no_title.png', color_threshold=None, orientation='top', **kwargs):
children = model.children_
og_distances = better_dendodist(children, dist, tree, data, axis=axis, clustering_method=clustering_method)
if dist in [custom_euclidean_sim, absolute_uncentered_pearson_corr, absolute_pearson_corr]:
# These similarities are already nonnegative [0,inf) or [0,1]
# og_distances = og_distances
pass
else: # all the correlation similarities [-1,-1]
og_distances = [temp + 1 for temp in og_distances]
# Now that all similarities are nonnegative, we turn them into a distance for plotting purposes
og_distances = [1 / temp for temp in og_distances]
# print(og_distances)
distance = np.cumsum(og_distances)
# distance = og_distances
# distance = better_dendodist(children, dist, tree, data, axis=axis)
# norm_distances = []
# for value in distance:
# norm_distances.append(1/value)
# norm_distances = distance
list_of_children = list(get_children(tree, leaves_are_self_children=False).values())
no_of_observations = [len(i) for i in list_of_children if i]
no_of_observations.append(len(no_of_observations) + 1)
# print(len(no_of_observations))
# print(children)
# print(list(tree.values()))
# print(norm_distances)
# print(distance)
if all(value == 0 for value in distance):
# If all distances are zero, then use uniform distance
distance = np.arange(len(distance))
# print(distance)
# print(np.cumsum(distance))
# The number of observations contained in each cluster level
# no_of_observations = np.arange(2, children.shape[0]+2)
# print(no_of_observations)
# Create linkage matrix and then plot the dendrogram
# linkage_matrix = np.column_stack([children, distance, no_of_observations]).astype(float)
# linkage_matrix = np.column_stack([children, np.cumsum(distance), no_of_observations]).astype(float)
linkage_matrix = np.column_stack([children, distance, no_of_observations]).astype(float)
# linkage_matrix = np.column_stack([children, norm_distances, no_of_observations]).astype(float)
# print(linkage_matrix)
# Plot the corresponding dendrogram
# print(scipy.cluster.hierarchy.cut_tree(linkage_matrix, n_clusters=5))
# print(color_threshold)
# find what the height at which to cut the dendrogram
if color_threshold is not None:
if color_threshold == 1:
color_threshold = 2
if color_threshold > (len(linkage_matrix) + 1):
color_threshold = (len(linkage_matrix) + 1)
# print('Finding the right cut')
color_threshold = linkage_matrix[-(color_threshold - 1)][2] - np.finfo(float).eps
# color_threshold = linkage_matrix[-(color_threshold - 1)][2] + 10*np.finfo(float).eps # Adding more wiggle room
# print(color_threshold)
R = dendrogram(linkage_matrix, color_threshold=color_threshold, orientation=orientation, **kwargs)
# R = dendrogram(linkage_matrix, **kwargs)
# [label.set_rotation(90) for label in plt.gca().get_xticklabels()]
order_of_columns = R['ivl']
# # print(order_of_columns)
# plt.gca().get_yaxis().set_visible(False)
# plt.savefig(title, dpi=300)
# plt.show()
# n = len(linkage_matrix) + 1
# cache = dict()
# for k in range(len(linkage_matrix)):
# c1, c2 = int(linkage_matrix[k][0]), int(linkage_matrix[k][1])
# c1 = [c1] if c1 < n else cache.pop(c1)
# c2 = [c2] if c2 < n else cache.pop(c2)
# cache[n + k] = c1 + c2
# order_of_columns = cache[2 * len(linkage_matrix)]
# print(order_of_columns)
# print(linkage_matrix)
# print("---")
# print(no_of_observations)
# print("---")
# print(list_of_children)
# print("---")
#
# print(len(order_of_columns))
# print(color_threshold)
# clusters2idxs, idxs2clusters = get_cluster_classes(R)
#
# print(clusters2idxs)
# print(idxs2clusters)
# print("---")
# print(get_children(tree, leaves_are_self_children=False))
# print("---")
# print(get_children(tree, leaves_are_self_children=False, only_leaves_are_children=False))
return order_of_columns, linkage_matrix
def get_clusters(tree):
return
def get_cluster_classes(den, label='ivl'):
# from http://www.nxn.se/valent/extract-cluster-elements-by-color-in-python
clusters2idxs = defaultdict(list)
idxs2clusters = {}
# for c, pi in zip(den['color_list'], den['icoord']):
# for leg in pi[1:3]:
# i = (leg - 5.0) / 10.0
# if abs(i - int(i)) < 1e-5:
# clusters2idxs[c].append(int(i))
# idxs2clusters[int(i)] = c
# # print(c, i)
# cluster_classes = Clusters()
# for c, l in cluster_idxs.items():
# i_l = [den[label][i] for i in l]
# cluster_classes[c] = i_l
# Trying something new:
print(den.keys())
print(len(den['icoord']))
print(len(den['dcoord']))
print(len(den['ivl']))
print(len(den['leaves']))
print(den['leaves'])
print(len(den['color_list']))
print(den['color_list'])
return clusters2idxs, idxs2clusters
def order_leaves(model, data, tree, labels, axis=0, dist=mydist, reverse=False):
# Adapted from here: https://stackoverflow.com/questions/12572436/calculate-ordering-of-dendrogram-leaves
children = model.children_
# distance = better_dendodist(children, dist, tree, data, axis=axis)
# if all(value == 0 for value in distance):
# distance = np.arange(len(distance))
# list_of_children = list(get_children(tree, leaves_are_self_children=False).values())
# no_of_observations = [len(i) for i in list_of_children if i]
# no_of_observations.append(len(no_of_observations)+1)
# Create linkage matrix and then plot the dendrogram
# linkage_matrix = np.column_stack([children, distance, no_of_observations]).astype(float)
pseudo_linkage_matrix = np.column_stack([children]).astype(float)
n = len(pseudo_linkage_matrix) + 1
# This orders leaves by number of clusters
cache = dict()
for k in range(len(pseudo_linkage_matrix)):
c1, c2 = int(pseudo_linkage_matrix[k][0]), int(pseudo_linkage_matrix[k][1])
c1 = [c1] if c1 < n else cache.pop(c1)
c2 = [c2] if c2 < n else cache.pop(c2)
cache[n + k] = c1 + c2
numeric_order_of_leaves = cache[2 * len(pseudo_linkage_matrix)]
if reverse:
numeric_order_of_leaves = list(reversed(numeric_order_of_leaves))
return [labels[i] for i in numeric_order_of_leaves]
def two_plot_two_dendrogram(model, dist=mydist, **kwargs):
# modified from https://github.com/scikit-learn/scikit-learn/pull/3464/files
# Children of hierarchical clustering
children = model.children_
# Distances between each pair of children
distance = dendodist(children, dist)
if all(value == 0 for value in distance):
# If all distances are zero, then use uniform distance
distance = np.arange(len(distance))
# The number of observations contained in each cluster level
no_of_observations = np.arange(2, children.shape[0] + 2)
# Create linkage matrix and then plot the dendrogram
linkage_matrix = np.column_stack([children, distance, no_of_observations]).astype(float)
# Plot the corresponding dendrogram
R = dendrogram(linkage_matrix, color_threshold=0, orientation='left', **kwargs)
# [label.set_rotation(90) for label in plt.gca().get_xticklabels()]
order_of_rows = R['ivl']
# print(order_of_columns)
plt.gca().get_xaxis().set_visible(False)
return list(reversed(order_of_rows))
def my_affinity_generic(M, metric):
return np.array([np.array([metric(a, b) for a in M]) for b in M])
def my_affinity_i(M):
return np.array([[information_coefficient_dist(a, b) for a in M] for b in M])
def my_affinity_ai(M):
return np.array([[absolute_information_coefficient_dist(a, b) for a in M] for b in M])
def my_affinity_p(M):
return np.array([[custom_pearson_dist(a, b) for a in M] for b in M])
def my_affinity_s(M):
return np.array([[custom_spearman_dist(a, b) for a in M] for b in M])
def my_affinity_k(M):
return np.array([[custom_kendall_tau_dist(a, b) for a in M] for b in M])
def my_affinity_ap(M):
return np.array([[absolute_pearson_dist(a, b) for a in M] for b in M])
def my_affinity_u(M):
return np.array([[uncentered_pearson_dist(a, b) for a in M] for b in M])
def my_affinity_au(M):
return np.array([[absolute_uncentered_pearson_dist(a, b) for a in M] for b in M])
def my_affinity_l1(M):
return np.array([[custom_manhattan_dist(a, b) for a in M] for b in M])
def my_affinity_l2(M):
return np.array([[custom_euclidean_dist(a, b) for a in M] for b in M])
def my_affinity_m(M):
return np.array([[custom_manhattan_dist(a, b) for a in M] for b in M])
def my_affinity_c(M):
return np.array([[custom_cosine_dist(a, b) for a in M] for b in M])
def my_affinity_e(M):
# global dist_matrix
# dist_matrix = np.array([[mydist(a, b) for a in M]for b in M])
# return dist_matrix
return np.array([[custom_euclidean_dist(a, b) for a in M] for b in M])
def count_diff(x):
count = 0
compare = x[0]
for i in x:
if i != compare:
count += 1
return count
def count_mislabels(labels, true_labels):
# 2017-08-17: I will make the assumption that clusters have only 2 values.
# clusters = np.unique(true_labels)
# mislabels = 0
# for curr_clust in clusters:
# print("for label", curr_clust)
# print("\t", labels[(true_labels == curr_clust)])
# compare_to = mode(labels[(true_labels == curr_clust)])
# print("\tcompare to:", compare_to, "mislables: ", np.count_nonzero(labels[(true_labels == curr_clust)] != compare_to))
# mislabels += np.count_nonzero(labels[(true_labels == curr_clust)] != compare_to)
set_a = labels[true_labels == 0]
set_b = labels[true_labels == 1]
if len(set_a) <= len(set_b):
shorter = set_a
longer = set_b
else:
shorter = set_b
longer = set_a
long_mode = mode(longer) # this what the label of the longer cluster should be.
short_mode = 1 if long_mode == 0 else 0 # Choose the other value for the label of the shorter cluster
# start with the longer vector:
# print("The long set is", longer, "it has", np.count_nonzero(longer != long_mode), 'mislabels.')
# print("The short set is", shorter, "it has", np.count_nonzero(shorter != short_mode), 'mislabels.')
# np.count_nonzero(longer != long_mode) + np.count_nonzero(shorter != short_mode)
return np.count_nonzero(longer != long_mode) + np.count_nonzero(shorter != short_mode)
def plot_heatmap(df, col_order, row_order, top=5, title_text='differentially expressed genes per phenotype'):
if not (len(col_order), len(list(df))):
exit("Number of columns in dataframe do not match the columns provided for ordering.")
if not (len(row_order), len(df)):
exit("Number of rows in dataframe do not match the columns provided for ordering.")
# print(list(df), col_order)
df = df[col_order]
df = df.reindex(row_order)
plt.clf()
sns.heatmap(df.iloc[np.r_[0:top, -top:0], :], cmap='viridis')
plt.yticks(rotation=0)
plt.xticks(rotation=90)
plt.title('Top {} {}'.format(top, title_text))
plt.ylabel('Genes')
plt.xlabel('Sample')
plt.savefig('heatmap.png', dpi=300, bbox_inches="tight")
def parse_data(gct_name, row_normalization=False, col_normalization=False, row_centering=None, col_centering=None):
# if validators.url(gct_name):
# urlfile, __ = urllib.request.urlretrieve(gct_name)
# else:
# urlfile = gct_name
# f = open(urlfile)
# f.readline()
# size = f.readline().strip('\n').split('\t')
try:
data_df = pd.read_csv(gct_name, sep='\t', skiprows=2)
except ValueError:
data_df = gct_name
# print(size)
# print(list(data_df))
# exit(data_df.shape)
if data_df.index.name is 'Name':
data_df['Name'] = data_df.index
else:
if 'Name' not in list(data_df):
data_df['Name'] = data_df.iloc[:, 0]
data_df.drop(data_df.columns[0], axis=1, inplace=True)
if 'Description' not in list(data_df):
data_df['Description'] = data_df['Name']
data_df.set_index(data_df['Name'], inplace=True)
og_full_gct = data_df.copy()
og_full_gct.drop(['Name'], axis=1, inplace=True)
data_df.drop(['Name', 'Description'], axis=1, inplace=True)
plot_labels = list(og_full_gct.drop(['Description'], axis=1, inplace=False))
data = data_df.as_matrix()
row_labels = data_df.index.values
og_data = data.copy()
# if row_centering is not None:
# if row_centering == 'Mean':
# row_means = np.mean(data, axis=1)
# row_means_col_vec = row_means.reshape((data.shape[0], 1))
# data = data - row_means_col_vec
# if row_centering == 'Median':
# row_medians = np.median(data, axis=1)
# row_medians_col_vec = row_medians.reshape((data.shape[0], 1))
# data = data - row_medians_col_vec
#
# if row_normalization:
# row_norm = np.sum(data * data, axis=1)
# row_norm_col_vec = row_norm.reshape((data.shape[0], 1))
# data = data / np.sqrt(row_norm_col_vec)
#
# if col_centering is not None:
# if col_centering == 'Mean':
# col_means = np.mean(data, axis=0)
# data = data - col_means
# if col_centering == 'Median':
# col_medians = np.median(data, axis=0)
# data = data - col_medians
#
# if col_normalization:
# col_norm = np.sum(data*data, axis=0)
# data = data/np.sqrt(col_norm)
data = normalize_dataframe(data_df, log_normalize=None,
row_centering=row_centering, row_normalization=row_normalization,
col_centering=col_centering, col_normalization=col_normalization).as_matrix()
# print(data_df)
# print(data)
new_data_df = pd.DataFrame(data=data, index=data_df.index, columns=list(data_df))
# print(new_data_df)
# print(og_full_gct)
new_full_gct = new_data_df.copy()
new_full_gct.insert(0, column='Description', value=og_full_gct['Description'])
# print(new_full_gct)
# exit()
return og_data, data_df, data, new_data_df, plot_labels, row_labels, og_full_gct, new_full_gct
str2func = {
'custom_euclidean': my_affinity_e,
'uncentered_pearson': my_affinity_u,
'absolute_uncentered_pearson': my_affinity_au,
'information_coefficient': my_affinity_i,
'pearson': my_affinity_p,
'spearman': my_affinity_s,
'kendall': my_affinity_k,
'absolute_pearson': my_affinity_ap,
'l1': 'l1',
'l2': 'l2',
'manhattan': 'manhattan',
'cosine': 'cosine',
'euclidean': 'euclidean',
}
str2affinity_func = {
'custom_euclidean': my_affinity_e,
'uncentered_pearson': my_affinity_u,
'absolute_uncentered_pearson': my_affinity_au,
'information_coefficient': my_affinity_i,
'pearson': my_affinity_p,
'spearman': my_affinity_s,
'kendall': my_affinity_k,
'absolute_pearson': my_affinity_ap,
'l1': my_affinity_l1,
'l2': my_affinity_l2,
'manhattan': my_affinity_m,
'cosine': my_affinity_c,
'euclidean': my_affinity_e,
}
str2dist = {
'custom_euclidean': custom_euclidean_dist,
'uncentered_pearson': uncentered_pearson_dist,
'absolute_uncentered_pearson': absolute_uncentered_pearson_dist,
'information_coefficient': information_coefficient_dist,
'pearson': custom_pearson_dist,
'spearman': custom_spearman_dist,
'kendall': custom_kendall_tau_dist,
'absolute_pearson': absolute_pearson_dist,
'l1': custom_manhattan_dist,
'l2': custom_euclidean_dist,
'manhattan': custom_manhattan_dist,
'cosine': custom_cosine_dist,
'euclidean': custom_euclidean_dist,
}
str2similarity = {
'custom_euclidean': custom_euclidean_sim,
'uncentered_pearson': uncentered_pearson_corr,
'absolute_uncentered_pearson': absolute_uncentered_pearson_corr,
'information_coefficient': information_coefficient,
'pearson': custom_pearson_corr,
'spearman': custom_spearman_corr,
'kendall': custom_kendall_tau_corr,
'absolute_pearson': absolute_pearson_corr,
'l1': custom_manhattan_sim,
'l2': custom_euclidean_sim,
'manhattan': custom_manhattan_sim,
'cosine': custom_cosine_sim,
# 'euclidean': pairwise.paired_euclidean_distances,
'euclidean': custom_euclidean_sim,
# 'euclidean': custom_euclidean_dist,
}
linkage_dic = {
'Pairwise average-linkage': 'average',
'Pairwise complete-linkage': 'complete',
'Pairwise ward-linkage': 'ward',
'average': 'average',
'complete': 'complete',
'ward': 'ward',
}
def make_tree(model, data=None):
# ii = itertools.count(data.shape[0]) # Setting the counter at the number of leaves.
# tree = [{'node_id': next(ii), 'left': x[0], 'right':x[1]} for x in model.children_]
# print(tree)
# return tree
return dict(enumerate(model.children_, model.n_leaves_))
# return dict(enumerate(model.children_, 1))
def make_cdt(data, order_of_columns, order_of_rows, name='test.cdt', atr_companion=True, gtr_companion=False):
# TODO: if order_of_columns == None, then do arange(len(list(data)))
# TODO: if order_of_rows == None, then do arange(len(list(data)))
# exit(data.to_csv())
data.index.name = "ID"
data.rename(columns={'Description': 'Name'}, inplace=True)
temp = np.ones(len(data))
data.insert(loc=1, column='GWEIGHT', value=temp) # adding an extra column
# These three lines add a row
data.loc['EWEIGHT'] = list(np.ones(len(list(data))))
newIndex = ['EWEIGHT'] + [ind for ind in data.index if ind != 'EWEIGHT']
data = data.reindex(index=newIndex)
if atr_companion:
new_AID = ['', '']
for element in range(len(order_of_columns)):
temp = 'ARRY' + str(element) + 'X'
new_AID.append(temp)
data.loc['AID'] = new_AID
newIndex = ['AID'] + [ind for ind in data.index if ind != 'AID']
data = data.reindex(index=newIndex)
data = data[['Name', 'GWEIGHT'] + order_of_columns]
if gtr_companion:
new_GID = ['']
if atr_companion:
new_GID = ['AID', 'EWEIGHT'] # This is to make sure we fit the CDT format
# for element in np.sort(np.unique(GID)):
# if 'NODE' in element:
# # print(element, 'GTR delete')
# pass
# else:
# new_GID.append(element)
for element in range(len(order_of_rows)):
temp = 'GENE' + str(element) + 'X'
new_GID.append(temp)
data.insert(loc=0, column='GID', value=new_GID) # adding an extra column
data.insert(loc=0, column=data.index.name, value=data.index) # Making the index a column
# reorder to match dendogram
temp = ['AID', 'EWEIGHT'] + order_of_rows
# data = data.loc[temp]
# print(data['GID'])
data = data.reindex(temp)
# print(data['GID'])
# print(list(data.index))
# print(data['GID'])
# print(data['Name'])
# Making the 'GID' the index -- for printing purposes
data.index = data['GID']
data.index.name = 'GID'
data.drop(['GID'], axis=1, inplace=True)
# print(list(data.index))
# The first three lines need to be written separately due to a quirk in the CDT file format:
# print(data.to_csv(sep='\t', index=True, header=True))
f = open(name, 'w')
f.write(data.to_csv(sep='\t', index=True, header=True))
# f.write(data.to_csv(sep='\t', index=True, header=True))
f.close()
# pd.options.display.float_format = '{:3.3f}'.format
data = data.round(2)
# print(data.to_csv())
# exit()
# exit(data.to_csv(sep=' ', index=True, header=True, float_format='2',))
return
def make_atr(col_tree_dic, data, dist, clustering_method='average', file_name='test.atr'):
max_val = len(col_tree_dic)
# AID = []
# compute distances
distance_dic = {}
for node, children in col_tree_dic.items():
val = centroid_distances(children[0], children[1], tree=col_tree_dic, data=data, axis=1,
distance=dist, clustering_method=clustering_method)
# print(dist, children, val)
# print("Value is", val)
distance_dic[node] = val
# if dist == custom_euclidean_sim:
# print("Euclidean distance is especial, normalizing using this scheme:")
# low_norm = min(distance_dic.values())
# high_norm = max(distance_dic.values())
# for key in distance_dic.keys():
# # distance -= norm
# # distance_dic[key] = distance_dic[key]/high_norm
# # distance_dic[key] = (distance_dic[key]-low_norm)/high_norm
# # distance_dic[key] = distance_dic[key]/high_norm
# # distance_dic[key] = ((1/distance_dic[key])-high_norm)/low_norm
# print(distance_dic[key])
f = open(file_name, 'w')
for node, children in col_tree_dic.items():
elements = [translate_tree(node, max_val, 'atr'), translate_tree(children[0], max_val, 'atr'),
translate_tree(children[1], max_val, 'atr'),
"{num:.{width}f}".format(num=distance_dic[node], width=SIGNIFICANT_DIGITS)]
# print('\t', '\t'.join(elements))
# AID.append(translate_tree(children[0], max_val, 'atr'))
# AID.append(translate_tree(children[1], max_val, 'atr'))
f.write('\t'.join(elements) + '\n')
# print('\t'.join(elements) + '\n')
f.close()
return
def make_gtr(row_tree_dic, data, dist, clustering_method='average', file_name='test.gtr'):
max_val = len(row_tree_dic)
# GID = []
# compute distances
distance_dic = {}
for node, children in row_tree_dic.items():
val = centroid_distances(children[0], children[1], tree=row_tree_dic, data=data, axis=0,
distance=dist, clustering_method=clustering_method)
distance_dic[node] = val
f = open(file_name, 'w')
for node, children in row_tree_dic.items():
elements = [translate_tree(node, max_val, 'gtr'), translate_tree(children[0], max_val, 'gtr'),
translate_tree(children[1], max_val, 'gtr'),
"{num:.{width}f}".format(num=distance_dic[node], width=SIGNIFICANT_DIGITS)]
# GID.append(translate_tree(children[0], max_val, 'gtr'))
# GID.append(translate_tree(children[1], max_val, 'gtr'))
f.write('\t'.join(elements) + '\n')
# val -= 1
f.close()
return
def translate_tree(what, length, g_or_a):
if 'a' in g_or_a:
if what <= length:
translation = 'ARRY' + str(what) + 'X'
else:
translation = 'NODE' + str(what - length) + 'X'
elif 'g' in g_or_a:
if what <= length:
translation = 'GENE' + str(what) + 'X'
else:
translation = 'NODE' + str(what - length) + 'X'
else:
translation = []
print('This function does not support g_or_a=', g_or_a)
return translation
# def get_children_recursively(k, model, node_dict, leaf_count, n_samples, data, verbose=False, left=None, right=None):
# # print(k)
# i, j = model.children_[k]
#
# if k in node_dict:
# return node_dict[k]['children']
#
# if i < leaf_count:
# # print("i if")
# left = [i]
# else:
# # print("i else")
# # read the AgglomerativeClustering doc. to see why I select i-n_samples
# left, node_dict = get_children_recursively(i - n_samples, model, node_dict,
# leaf_count, n_samples, data, verbose, left, right)
#
# if j < leaf_count:
# # print("j if")
# right = [j]
# else:
# # print("j else")
# right, node_dict = get_children_recursively(j - n_samples, model, node_dict,
# leaf_count, n_samples, data, verbose, left, right)
#
# if verbose:
# print(k, i, j, left, right)
# temp = map(lambda ii: data[ii], left)
# left_pos = np.mean(list(temp), axis=0)
# temp = map(lambda ii: data[ii], right)
# right_pos = np.mean(list(temp), axis=0)
#
# # this assumes that agg_cluster used euclidean distances
# dist = metrics.pairwise_distances([left_pos, right_pos], metric='euclidean')[0, 1]
#
# all_children = [x for y in [left, right] for x in y]
# pos = np.mean(list(map(lambda ii: data[ii], all_children)), axis=0)
#
# # store the results to speed up any additional or recursive evaluations
# node_dict[k] = {'top_child': [i, j], 'children': all_children, 'pos': pos, 'dist': dist,
# 'node_i': k + n_samples}
# return all_children, node_dict
# def recursive_atr
def get_children(tree, leaves_are_self_children=False):
# this is a recursive function
expanded_tree = {}
for node in range(max(tree.keys())):
if node <= len(tree):
if leaves_are_self_children:
expanded_tree[node] = [node]
else:
expanded_tree[node] = []
else:
# expanded_tree[node] = list_children_single_node(node, tree)
expanded_tree[node] = list_children_single_node(node, tree, leaves_are_self_children)
return expanded_tree
def list_children_single_node(node, tree, leaves_are_self_children=False, only_leaves_are_children=True):
# children = []
if node <= len(tree):
if leaves_are_self_children:
children = [node]
else:
children = []
else:
children = list(tree[node])
# Check each child, and add their children to the list
for child in children:
if child <= len(tree):
pass
else:
children += list_children_single_node(child, tree, only_leaves_are_children=True)
if only_leaves_are_children:
# print(sorted(np.unique(i for i in children if i <= len(tree))))
# print()
return [i for i in sorted(np.unique(children)) if i <= len(tree)]
else:
return sorted(np.unique(children))
def centroid_distances(node_a, node_b, tree, data, axis=0, distance=mydist, clustering_method='average'):
if axis == 0:
pass
elif axis == 1:
data = np.transpose(data)
else:
exit("Variable 'data' does not have that many axises (╯°□°)╯︵ ┻━┻")
children_of_a = list_children_single_node(node_a, tree=tree, leaves_are_self_children=True)
children_of_b = list_children_single_node(node_b, tree=tree, leaves_are_self_children=True)
# if distance == custom_euclidean_sim:
# print("Euclidean distance is especial, normalizing using this scheme:")
# distance = custom_euclidean_dist
distances_list = []
if clustering_method == 'average':
for pair in itertools.product(data[children_of_a], data[children_of_b]):
distances_list.append(distance(pair[0], pair[1]))
return np.average(distances_list)
elif clustering_method == 'complete':
for pair in itertools.product(data[children_of_a], data[children_of_b]):
distances_list.append(distance(pair[0], pair[1]))
return np.min(distances_list)
else:
exit("Ony 'average' and 'complete' clustering methods are accepted at the moment (>_<)")
def euclidian_similarity(x, y):
dist = mydist(x, y)
# return 1/(1+dist)
return 1 / (np.exp(dist))
def better_dendodist(children, distance, tree, data, axis, clustering_method='average'):
distances_list = []
for pair in children:
distances_list.append(centroid_distances(pair[0], pair[1], tree, data, axis, distance=distance,
clustering_method=clustering_method))
# print(distance, pair, distances_list[-1])
return distances_list
def HierarchicalClustering(pwd: "The current directory",
gct_name: "Gene expression data filename (.gct file) or Pandas DataFrame "
"where rows are genes and columns are samples",
col_distance_metric: "The function to be used when comparing the distance/similarity of "
"the columns in the gct_name dataset",
row_distance_metric: "The function to be used when comparing the distance/similarity of "
"the rows in the gct_name dataset",
clustering_method: "Type of linkage to use" = 'average',
output_base_name: "Base name for output file" = 'HC_output',
row_normalization: "Whether to normalize each row (gene) in the data" = False,
col_normalization: "Whether to normalize each column (sample) in the data" = False,
row_centering: "How to center each row (gene) in the data" = 'Mean',
col_centering: "How to center each column (sample) in the data" = 'Mean',
output_distances: "Whether or not output the pair-wise distance matrix. "
"If true, the distance between each column will be called, "
"which can be very computationally intensive. "
"If unsure, leave as False." = False,
custom_plot: "Plot the dendrograms by Genes, Samples, or Both" = 'Both',
clusters_to_highlight: "How many clusters to highlight in the dendrogram" = 2,
show: "Whether to show the plot at the end" = False):
# gct_name, col_distance_metric, output_distances, row_distance_metric, clustering_method, output_base_name, \
# row_normalization, col_normalization, row_centering, col_centering = parse_inputs(sys.argv)
if col_distance_metric == "No_column_clustering":
custom_plot = 'Genes'
if row_distance_metric == "No_row_clustering":
custom_plot = 'Samples'
og_data, og_data_df, data, data_df, col_labels, row_labels, og_full_gct, new_full_gct = \
parse_data(gct_name, row_normalization, col_normalization, row_centering, col_centering)
order_of_columns = list(data_df)
order_of_rows = list(data_df.index)
data_transpose = np.transpose(data)
# print(data)
# print(data_df)
atr_companion = False
col_model = None
col_tree = None
gtr_companion = False
row_model = None
row_tree = None
AID = None
GID = None
if col_distance_metric != 'No_column_clustering':
atr_companion = True
col_model = AgglomerativeClustering(linkage=linkage_dic[clustering_method], n_clusters=clusters_to_highlight,
affinity=str2func[col_distance_metric])
col_model.fit(data_transpose)
col_tree = make_tree(col_model)
order_of_columns = order_leaves(col_model, tree=col_tree, data=data_transpose,
dist=str2similarity[col_distance_metric], labels=col_labels, reverse=True)
path_to_atr = output_base_name + '.atr'
make_atr(col_tree, file_name=path_to_atr, data=data,
dist=str2similarity[col_distance_metric], clustering_method=linkage_dic[clustering_method])
if row_distance_metric != 'No_row_clustering':
gtr_companion = True
row_model = AgglomerativeClustering(linkage=linkage_dic[clustering_method], n_clusters=clusters_to_highlight,
affinity=str2func[row_distance_metric])
# y_col = row_model.fit_predict(np.transpose(data))
# print(y_col)
row_model.fit(data)
row_tree = make_tree(row_model)
order_of_rows = order_leaves(row_model, tree=row_tree, data=data,
dist=str2similarity[row_distance_metric], labels=row_labels)
path_to_gtr = output_base_name + '.gtr'
make_gtr(row_tree, data=data, file_name=output_base_name + '.gtr', dist=str2similarity[row_distance_metric])
if output_distances:
# TODO: check which col or row was selected, or both
row_distance_matrix = str2affinity_func[row_distance_metric](data)
# col_distance_matrix = str2affinity_func[col_distance_metric](np.transpose(data))
dist_file = open(output_base_name + '_pairwise_distances.csv', 'w')
dist_file.write('labels,')
dist_file.write(",".join(col_model.labels_.astype(str)) + "\n")
dist_file.write('samples,')
dist_file.write(",".join(list(data_df)) + "\n")
i = 0
for row in row_distance_matrix:
dist_file.write('distances row=' + str(i) + "," + ",".join(row.astype(str)) + "\n")
i += 1
path_to_cdt = output_base_name + '.cdt'
make_cdt(data=new_full_gct, name=path_to_cdt, atr_companion=atr_companion,
gtr_companion=gtr_companion,
order_of_columns=order_of_columns, order_of_rows=order_of_rows)
if custom_plot == 'Samples':
# Plotting the heatmap with dendrogram
plt.clf()
# fig = plt.figure(figsize=(16, 9), dpi=300)
fig = plt.figure(figsize=(16, 9))
gs = gridspec.GridSpec(2, 1, height_ratios=[1, 5])
gs.update(wspace=0.0, hspace=0.0)
ax0 = plt.subplot(gs[0]) # Doing dendrogram first
ax0.axis('off')
col_order, link = plot_dendrogram(col_model, data, col_tree, axis=1,
dist=str2similarity[col_distance_metric],
clustering_method=clustering_method,
color_threshold=clusters_to_highlight,
title='no_title.png', orientation='top')
col_order = [int(i) for i in col_order]
# print(col_order)
named_col_order = [col_labels[i] for i in col_order]
# print(named_col_order)
# print(col_order)
# print(col_model.labels_)
ax1 = plt.subplot(gs[1])
# Row-normalizing for display purposes only:
data_df = data_df.subtract(data_df.min(axis=1), axis=0)
data_df = data_df.div(data_df.max(axis=1), axis=0)
sns.heatmap(data_df[named_col_order], ax=ax1, cbar=False, cmap='bwr')
# ax1.xaxis.tick_top()
[label.set_rotation(90) for label in ax1.get_xticklabels()]
file_path_plot = output_base_name + '.pdf'
plt.savefig(file_path_plot, bbox_inches='tight')
print("----------------------------------------------------------------------")
print("The PDF of this heatmap can be downloaded here:")
display(HTML('<a href="' + file_path_plot + '" target="_blank">PDF of the heatmap</a>'))
print("----------------------------------------------------------------------")
print("The CDF which is compatible with HierarchicalClusteringViewer is here:")
display(HTML('<a href="' + path_to_cdt + '" target="_blank">TXT containing the output data</a>'))
print("----------------------------------------------------------------------")
print("The ATR which is compatible with HierarchicalClusteringViewer is here:")
display(HTML('<a href="' + path_to_atr + '" target="_blank">TXT containing the output data</a>'))
print("----------------------------------------------------------------------")
if show:
# plt.show()
pass
# col_order = [int(i) for i in col_order]
# print(col_order)
# named_col_order = [col_labels[i] for i in col_order]
# print(named_col_order)
# print(col_order)
# print(idxs2clusters)
cls_list = col_model.labels_
# for i in range(len(col_order)):
# cls_list.append(idxs2clusters[i])
# print(cls_list)
# order_by = [col_order.index(i) for i in range(len(col_order))]
# list2intlist(cls_list, custom_order=order_by)
# in_list = np.array(cls_list)
# print(cls_list)
# print(np.array(list2intlist(cls_list, custom_order=order_by)))
list2cls(np.array(list2intlist(cls_list)), name_of_out=output_base_name+'.cls', sep=' ')
if custom_plot == 'Genes':
# Plotting the heatmap with dendrogram
plt.clf()
# fig = plt.figure(figsize=(16, 9), dpi=300)
fig = plt.figure(figsize=(16, 9))
gs = gridspec.GridSpec(1, 2, width_ratios=[5, 1])
gs.update(wspace=0.0, hspace=0.0)
ax0 = plt.subplot(gs[1]) # Doing dendrogram first
ax0.axis('off')
row_order, link = plot_dendrogram(row_model, data_transpose, row_tree, axis=1,
dist=str2similarity[row_distance_metric],
clustering_method=clustering_method,
color_threshold=clusters_to_highlight,
orientation='right', title='no_title.png')
# row_order = [int(i) for i in row_order]
# named_row_order = [row_labels[i] for i in row_order]
ax1 = plt.subplot(gs[0])
# Row-normalizing for display purposes only:
data_df = data_df.subtract(data_df.min(axis=1), axis=0)
data_df = data_df.div(data_df.max(axis=1), axis=0)
sns.heatmap(data_df.iloc[row_order], ax=ax1, cbar=False, cmap='bwr')
# ax1.xaxis.tick_top()
[label.set_rotation(90) for label in ax1.get_xticklabels()]
file_path_plot = output_base_name + '.pdf'
plt.savefig(file_path_plot, bbox_inches='tight')
print("----------------------------------------------------------------------")
print("The PDF of this heatmap can be downloaded here:")
display(HTML('<a href="' + file_path_plot + '" target="_blank">PDF of the heatmap</a>'))
print("----------------------------------------------------------------------")
print("The CDF which is compatible with HierarchicalClusteringViewer is here:")
display(HTML('<a href="' + path_to_cdt + '" target="_blank">TXT containing the output data</a>'))
print("----------------------------------------------------------------------")
print("The GTR which is compatible with HierarchicalClusteringViewer is here:")
display(HTML('<a href="' + path_to_gtr + '" target="_blank">TXT containing the output data</a>'))
print("----------------------------------------------------------------------")
if show:
plt.show()
if custom_plot == 'Both':
# Plotting the heatmap with dendrogram
plt.clf()
# fig = plt.figure(figsize=(16, 9), dpi=300)
fig = plt.figure(figsize=(16, 9))
gs = gridspec.GridSpec(2, 2, width_ratios=[5, 1], height_ratios=[1, 5])
gs.update(wspace=0.0, hspace=0.0)
# Doing TOP dendrogram first
ax0 = plt.subplot(gs[0])
ax0.axis('off')
col_order, link = plot_dendrogram(col_model, data, col_tree, axis=1,
dist=str2similarity[col_distance_metric],
clustering_method=clustering_method,
color_threshold=clusters_to_highlight,
title='no_title.png', orientation='top')
col_order = [int(i) for i in col_order]
named_col_order = [col_labels[i] for i in col_order]
# Doing RIGHT dendrogram
ax3 = plt.subplot(gs[3])
ax3.axis('off')
row_order, link = plot_dendrogram(row_model, data_transpose, row_tree, axis=1,
dist=str2similarity[row_distance_metric],
clustering_method=clustering_method,
color_threshold=clusters_to_highlight,
orientation='right', title='no_title.png')
# Plotting the heatmap now
ax1 = plt.subplot(gs[2])
# Row-normalizing for display purposes only:
data_df = data_df.subtract(data_df.min(axis=1), axis=0)
data_df = data_df.div(data_df.max(axis=1), axis=0)
sns.heatmap(data_df[named_col_order].iloc[row_order], ax=ax1, cbar=False, cmap='bwr')
# ax1.xaxis.tick_top()
[label.set_rotation(90) for label in ax1.get_xticklabels()]
file_path_plot = output_base_name + '.pdf'
plt.savefig(file_path_plot, bbox_inches='tight')
print("----------------------------------------------------------------------")
print("The PDF of this heatmap can be downloaded here:")
display(HTML('<a href="' + file_path_plot + '" target="_blank">PDF of the heatmap</a>'))
print("----------------------------------------------------------------------")
print("The CDF which is compatible with HierarchicalClusteringViewer is here:")
display(HTML('<a href="' + path_to_cdt + '" target="_blank">TXT containing the output data</a>'))
print("----------------------------------------------------------------------")
print("The GTR which is compatible with HierarchicalClusteringViewer is here:")
display(HTML('<a href="' + path_to_gtr + '" target="_blank">TXT containing the output data</a>'))
print("----------------------------------------------------------------------")
if show:
plt.show()
return col_model, row_model
def hc_samples(
input_gene_expression: "gene expression data filename (.gct file) where rows are genes and columns are samples",
clustering_type: "single or consensus -- Only single is suported at the moment",
distance_metric: "the function to be used when comparing the distance/similarity of the columns in the "
"input_gene_expression dataset",
file_basename: "the name to use when naming output files" = 'HC_out',
clusters_to_highlight: "how many clusters to highlight in the dendrogram" = None):
print("Currenty clustering_type is being ignored, only 'single' is supported.")
pwd = '.'
gct_name = input_gene_expression
col_distance_metric = distance_metric
output_distances = False
row_distance_metric = 'No_row_clustering'
clustering_method = 'average'
output_base_name = file_basename
row_normalization = False
col_normalization = False
row_centering = 'Mean'
col_centering = 'Mean'
custom_plot = 'Samples'
show = True
# print("This are the parameters to be used (for debugging purposes)")
# print("""
# pwd = '.'
# gct_name = {gct_name}
# col_distance_metric = {col_distance_metric}
# output_distances = {output_distances}
# row_distance_metric = {row_distance_metric}
# clustering_method = {clustering_method}
# output_base_name = {output_base_name}
# row_normalization = {row_normalization}
# col_normalization = {col_normalization}
# row_centering = {row_centering}
# col_centering = {col_centering}
# """.format(
# gct_name=gct_name, col_distance_metric=col_distance_metric,
# output_distances=str(output_distances),
# row_distance_metric=row_distance_metric, clustering_method=clustering_method,
# output_base_name=output_base_name,
# row_normalization=str(row_normalization), col_normalization=str(col_normalization),
# row_centering=row_centering, col_centering=col_centering
# )
# )
print("Now we will start performing hierarchical clustering, this may take a little while.")
col_model, row_model = HierarchicalClustering(pwd,
gct_name,
col_distance_metric,
row_distance_metric,
clustering_method,
output_base_name,
row_normalization,
col_normalization,
row_centering,
col_centering,
output_distances,
custom_plot,
clusters_to_highlight,
show)
print("Done with Hierarchical Clustering!")
return col_model
def hc_genes(
input_gene_expression: "gene expression data filename (.gct file) where rows are genes and columns are samples",
clustering_type: "single or consensus -- Only single is suported at the moment",
distance_metric: "the function to be used when comparing the distance/similarity of the rows in the "
"input_gene_expression dataset",
file_basename: "the name to use when naming output files" = 'HC_out',
clusters_to_highlight: "how many clusters to highlight in the dendrogram" = None):
print("Currenty clustering_type is being ignored, only 'single' is supported.")
pwd = '.'
gct_name = input_gene_expression
col_distance_metric = 'No_column_clustering'
output_distances = False
row_distance_metric = distance_metric
clustering_method = 'average'
output_base_name = file_basename
row_normalization = False
col_normalization = False
row_centering = 'Mean'
col_centering = 'Mean'
custom_plot = 'Genes'
show = True
# print("This are the parameters to be used (for debugging purposes)")
# print("""
# pwd = '.'
# gct_name = {gct_name}
# col_distance_metric = {col_distance_metric}
# output_distances = {output_distances}
# row_distance_metric = {row_distance_metric}
# clustering_method = {clustering_method}
# output_base_name = {output_base_name}
# row_normalization = {row_normalization}
# col_normalization = {col_normalization}
# row_centering = {row_centering}
# col_centering = {col_centering}
# """.format(
# gct_name=gct_name, col_distance_metric=col_distance_metric,
# output_distances=str(output_distances),
# row_distance_metric=row_distance_metric, clustering_method=clustering_method,
# output_base_name=output_base_name,
# row_normalization=str(row_normalization), col_normalization=str(col_normalization),
# row_centering=row_centering, col_centering=col_centering
# )
# )
print("Now we will start performing hierarchical clustering, this may take a little while.")
col_model, row_model = HierarchicalClustering(pwd,
gct_name,
col_distance_metric,
row_distance_metric,
clustering_method,
output_base_name,
row_normalization,
col_normalization,
row_centering,
col_centering,
output_distances,
custom_plot,
clusters_to_highlight,
show)
print("Done with Hierarchical Clustering!")
return row_model
def normalize_dataframe(df, log_normalize=None,
row_centering='Mean', row_normalization=True,
col_centering='Mean', col_normalization=True):
if (log_normalize is None) \
and (row_centering == 'No') and (col_centering == 'No') \
and (row_normalization is False) and (col_normalization is False):
print("No normalization has been requested ಠ_ಠ¯")
return df
data = df.as_matrix()
# Log Normalizing
if log_normalize is not None:
print("I'm sorry, log-normalization is not supported at the moment (u_u)")
if row_centering != 'No':
if row_centering == 'Mean':
row_means = np.mean(data, axis=1)
row_means_col_vec = row_means.reshape((data.shape[0], 1))
data = data - row_means_col_vec
elif row_centering == 'Median':
row_medians = np.median(data, axis=1)
row_medians_col_vec = row_medians.reshape((data.shape[0], 1))
data = data - row_medians_col_vec
else:
print("row_centering has an unexpected value:", row_centering)
if row_normalization:
row_norm = np.sum(data * data, axis=1)
row_norm_col_vec = row_norm.reshape((data.shape[0], 1))
data = data / np.sqrt(row_norm_col_vec)
if col_centering != 'No':
if col_centering == 'Mean':
col_means = np.mean(data, axis=0)
data = data - col_means
elif col_centering == 'Median':
col_medians = np.median(data, axis=0)
data = data - col_medians
else:
print("col_centering has an unexpected value: ", col_centering)
if col_normalization:
col_norm = np.sum(data * data, axis=0)
data = data / np.sqrt(col_norm)
normalized_df = pd.DataFrame(data=data, index=df.index, columns=list(df))
return normalized_df
def display_heatmap(data,
name='heatmap',
log_normalize=None,
row_centering: "How to center each row (gene) in the data" = 'No',
row_normalization: "Whether to normalize each row (gene) in the data" = True,
col_centering: "How to center each column (sample) in the data" = 'No',
col_normalization: "Whether to normalize each column (sample) in the data" = False,
mostrar=False):
if isinstance(data, pd.DataFrame):
data_to_plot = data.copy()
elif os.path.isfile(data):
data_to_plot = pd.read_table(data, skiprows=2, sep='\t')
data_to_plot.set_index('Name', inplace=True)
data_to_plot.drop('Description', axis=1, inplace=True)
else:
try:
data_to_plot = pd.read_table(data, skiprows=2, sep='\t')
except urllib.error.HTTPError:
print("I don't know what the variable 'data' contains.")
print('data=')
print(data)
exit("If this is a url it may not be accessible.\n"
"(╯°□°)╯︵ ┻━┻")
data_to_plot.set_index('Name', inplace=True)
data_to_plot.drop('Description', axis=1, inplace=True)
data_to_plot = normalize_dataframe(data_to_plot, log_normalize=log_normalize,
row_centering=row_centering, row_normalization=row_normalization,
col_centering=col_centering, col_normalization=col_normalization)
plt.clf()
# # figure reshape from:
# # https://stackoverflow.com/questions/35127920/overlapping-yticklabels-is-it-possible-to-control-cell-size-of-heatmap-in-seabo
# # and from:
# # https://matplotlib.org/users/customizing.html
# get the tick label font size
fontsize_pt = plt.rcParams['ytick.labelsize']
dpi = 72.27
# compute the matrix height in points and inches
matrix_height_pt = fontsize_pt * data_to_plot.as_matrix().shape[0]
matrix_height_in = (matrix_height_pt / dpi) * 1.2
# compute the required figure height
top_margin = 0.01 # in percentage of the figure height
bottom_margin = 0.01 # in percentage of the figure height
figure_height = matrix_height_in / (1 - top_margin - bottom_margin)
# build the figure instance with the desired height
fig, ax = plt.subplots(
figsize=(6, figure_height),
gridspec_kw=dict(top=1 - top_margin, bottom=bottom_margin))
sns.heatmap(data_to_plot, cmap='bwr', yticklabels=True, square=True,
cbar_kws={'use_gridspec': False,
'location': "right",
'shrink': 0.5,
'label': ''}
)
if not name.endswith('.pdf'):
name = name + '.pdf'
plt.savefig(name, dpi=dpi, bbox_inches='tight')
# plt.savefig(name, dpi=dpi)
print(name, "has been created!")
if mostrar:
# print(data_to_plot.head())
plt.show()
print("The PDF of this heatmap can be downloaded here:")
display(HTML('<a href="' + name + '" target="_blank">PDF of the heatmap</a>'))
return
| true | true |
f726e468fffed12d4ce9bb88c0a2c8505212f61d | 6,650 | py | Python | visualizer/visualizer/network.py | NikKaem/mapf-project | d99727d5f62380cf2a7d37dec70b5cdc71db3fb6 | [
"MIT"
] | null | null | null | visualizer/visualizer/network.py | NikKaem/mapf-project | d99727d5f62380cf2a7d37dec70b5cdc71db3fb6 | [
"MIT"
] | null | null | null | visualizer/visualizer/network.py | NikKaem/mapf-project | d99727d5f62380cf2a7d37dec70b5cdc71db3fb6 | [
"MIT"
] | null | null | null | from threading import Thread
import socket
import select
import time
import os
import clingo
import argparse
from PyQt5.QtCore import *
class VisualizerSocket(object):
def __init__(self, default_host = '127.0.0.1', default_port = 5000, socket_name = 'socket'):
self._host = default_host
self._port = default_port
self._s = None
self._timer = None
self._socket_name = socket_name
self._thread = None
self._parser = None
self._waiting = False
def __del__(self):
self.close()
def set_parser(self, parser):
self._parser = parser
def run_script(self, command, port = None):
self.close()
self._thread = Thread(target = lambda: os.system(command))
self._thread.start()
if port is not None:
self.connect('127.0.0.1', port)
def join(self, wait_time):
if self._thread is not None:
self._thread.join(wait_time)
self._thread = None
def run_connection(self):
if self._s is None:
return
if self._timer is not None:
self._timer.stop()
self._timer = QTimer()
self._timer.timeout.connect(self.receive)
self._timer.start(1000)
def connect(self, host = None, port = None):
if self.is_connected() and host == self._host and port == self._port:
return 0
if host is not None:
self._host = host
if port is not None:
self._port = port
self.close()
print('Try connection with '+ self._socket_name)
self._s = socket.socket()
connected = False
tryCount = 0
while not connected: #try to connect to the server
try:
self._s.connect((self._host, self._port))
connected = True
except(socket.error):
if tryCount >= 5:
print('Failed to connect with ' + self._socket_name)
self.close()
return -1
print('Failed to connect with ' + self._socket_name + ' \nRetrying in 2 sek')
time.sleep(2)
tryCount += 1
print('Connect with '+ self._socket_name)
return 0
def send(self, msg):
if self._s is None or msg is None:
return
if msg == '':
return
self._s.send(msg.encode('utf-8'))
pass
def done_step(self, step):
if self._s is None:
return
self._waiting = True
self._s.send(('%$done(' + str(step) + ').\n').encode('utf-8'))
def model_expanded(self, msg):
pass
def _receive_data(self):
breakLoop = False
data = ''
try:
ready = select.select([self._s], [], [], 0.1)
while (not breakLoop) and ready[0]:
new_data = self._s.recv(2048).decode()
if not new_data.find('\n') == -1 or new_data == '':
breakLoop = True
data += new_data
if ready[0] and new_data == '':
self.close()
return None
except socket.error as err:
print(err)
return data
def receive(self):
return
def run(self):
return
def close(self):
if self._timer is not None:
self._timer.stop()
if self._s is not None:
print('Close connection to ' + self._socket_name)
try:
self._s.shutdown(socket.SHUT_RDWR)
except socket.error:
pass
self._s.close()
self._s = None
self.join(10)
def is_connected(self):
return self._s is not None
def script_is_running(self):
return self._thread is not None
def is_waiting(self):
return self._waiting
def get_host(self):
return self._host
def get_port(self):
return self._port
class SolverSocket(VisualizerSocket):
def __init__(self):
super(self.__class__, self).__init__('127.0.0.1', 5000, 'solver')
self._model = None
def set_model(self, model):
self._model = model
if model is not None:
self._model.add_socket(self)
def model_expanded(self, msg):
self.send(msg)
self._waiting = True
def receive(self):
if self._s is None or self._parser is None or self._model is None:
return -1
data = self._receive_data()
if data is None:
return
if data == '':
return
self._waiting = False
for str_atom in data.split('.'):
if len(str_atom) != 0 and not (len(str_atom) == 1 and str_atom[0] == '\n'):
if str_atom == '%$RESET':
self._parser.clear_model_actions(True)
else:
self._parser.on_atom(clingo.parse_term(str_atom))
self._model.update_windows()
def solve(self):
if self._s == None or self._model == None: return -1
self._s.send('%$RESET.'.encode('utf-8'))
self._model.set_editable(False)
self._model.restart()
for atom in self._model.to_init_str(): #send instance
atom = atom.replace('\n', '')
self._s.send(str(atom).encode('utf-8'))
self._s.send('\n'.encode('utf-8'))
self.run_connection()
def run(self):
self.solve()
class SimulatorSocket(VisualizerSocket):
def __init__(self):
super(self.__class__, self).__init__('127.0.0.1', 5001, 'simulator')
def receive(self):
if self._s is None or self._parser is None:
return -1
data = self._receive_data()
empty = True
reset = False
if data is None:
return
if data == '':
return
self._waiting = False
for str_atom in data.split('.'):
if len(str_atom) != 0 and not (len(str_atom) == 1 and str_atom[0] == '\n'):
if str_atom == '%$RESET':
self._parser.clear_model()
reset = True
empty = False
else:
self._parser.on_atom(clingo.parse_term(str_atom))
empty = False
if not empty:
self._parser.done_instance(reset)
def connect(self, host = None, port = None):
VisualizerSocket.connect(self, host, port)
self.run()
def run(self):
self.run_connection()
| 29.424779 | 96 | 0.530226 | from threading import Thread
import socket
import select
import time
import os
import clingo
import argparse
from PyQt5.QtCore import *
class VisualizerSocket(object):
def __init__(self, default_host = '127.0.0.1', default_port = 5000, socket_name = 'socket'):
self._host = default_host
self._port = default_port
self._s = None
self._timer = None
self._socket_name = socket_name
self._thread = None
self._parser = None
self._waiting = False
def __del__(self):
self.close()
def set_parser(self, parser):
self._parser = parser
def run_script(self, command, port = None):
self.close()
self._thread = Thread(target = lambda: os.system(command))
self._thread.start()
if port is not None:
self.connect('127.0.0.1', port)
def join(self, wait_time):
if self._thread is not None:
self._thread.join(wait_time)
self._thread = None
def run_connection(self):
if self._s is None:
return
if self._timer is not None:
self._timer.stop()
self._timer = QTimer()
self._timer.timeout.connect(self.receive)
self._timer.start(1000)
def connect(self, host = None, port = None):
if self.is_connected() and host == self._host and port == self._port:
return 0
if host is not None:
self._host = host
if port is not None:
self._port = port
self.close()
print('Try connection with '+ self._socket_name)
self._s = socket.socket()
connected = False
tryCount = 0
while not connected:
try:
self._s.connect((self._host, self._port))
connected = True
except(socket.error):
if tryCount >= 5:
print('Failed to connect with ' + self._socket_name)
self.close()
return -1
print('Failed to connect with ' + self._socket_name + ' \nRetrying in 2 sek')
time.sleep(2)
tryCount += 1
print('Connect with '+ self._socket_name)
return 0
def send(self, msg):
if self._s is None or msg is None:
return
if msg == '':
return
self._s.send(msg.encode('utf-8'))
pass
def done_step(self, step):
if self._s is None:
return
self._waiting = True
self._s.send(('%$done(' + str(step) + ').\n').encode('utf-8'))
def model_expanded(self, msg):
pass
def _receive_data(self):
breakLoop = False
data = ''
try:
ready = select.select([self._s], [], [], 0.1)
while (not breakLoop) and ready[0]:
new_data = self._s.recv(2048).decode()
if not new_data.find('\n') == -1 or new_data == '':
breakLoop = True
data += new_data
if ready[0] and new_data == '':
self.close()
return None
except socket.error as err:
print(err)
return data
def receive(self):
return
def run(self):
return
def close(self):
if self._timer is not None:
self._timer.stop()
if self._s is not None:
print('Close connection to ' + self._socket_name)
try:
self._s.shutdown(socket.SHUT_RDWR)
except socket.error:
pass
self._s.close()
self._s = None
self.join(10)
def is_connected(self):
return self._s is not None
def script_is_running(self):
return self._thread is not None
def is_waiting(self):
return self._waiting
def get_host(self):
return self._host
def get_port(self):
return self._port
class SolverSocket(VisualizerSocket):
def __init__(self):
super(self.__class__, self).__init__('127.0.0.1', 5000, 'solver')
self._model = None
def set_model(self, model):
self._model = model
if model is not None:
self._model.add_socket(self)
def model_expanded(self, msg):
self.send(msg)
self._waiting = True
def receive(self):
if self._s is None or self._parser is None or self._model is None:
return -1
data = self._receive_data()
if data is None:
return
if data == '':
return
self._waiting = False
for str_atom in data.split('.'):
if len(str_atom) != 0 and not (len(str_atom) == 1 and str_atom[0] == '\n'):
if str_atom == '%$RESET':
self._parser.clear_model_actions(True)
else:
self._parser.on_atom(clingo.parse_term(str_atom))
self._model.update_windows()
def solve(self):
if self._s == None or self._model == None: return -1
self._s.send('%$RESET.'.encode('utf-8'))
self._model.set_editable(False)
self._model.restart()
for atom in self._model.to_init_str():
atom = atom.replace('\n', '')
self._s.send(str(atom).encode('utf-8'))
self._s.send('\n'.encode('utf-8'))
self.run_connection()
def run(self):
self.solve()
class SimulatorSocket(VisualizerSocket):
def __init__(self):
super(self.__class__, self).__init__('127.0.0.1', 5001, 'simulator')
def receive(self):
if self._s is None or self._parser is None:
return -1
data = self._receive_data()
empty = True
reset = False
if data is None:
return
if data == '':
return
self._waiting = False
for str_atom in data.split('.'):
if len(str_atom) != 0 and not (len(str_atom) == 1 and str_atom[0] == '\n'):
if str_atom == '%$RESET':
self._parser.clear_model()
reset = True
empty = False
else:
self._parser.on_atom(clingo.parse_term(str_atom))
empty = False
if not empty:
self._parser.done_instance(reset)
def connect(self, host = None, port = None):
VisualizerSocket.connect(self, host, port)
self.run()
def run(self):
self.run_connection()
| true | true |
f726e4b41f15fdd676d9d580ff8e3144b72f2f13 | 4,712 | py | Python | taxumap-manuscript-notebooks/embeddings.py | jsevo/taxumap | 1a02518dca822a65847994910177c74607243dae | [
"MIT"
] | 5 | 2021-11-21T16:47:17.000Z | 2022-02-04T16:57:15.000Z | taxumap-manuscript-notebooks/embeddings.py | jsevo/taxumap | 1a02518dca822a65847994910177c74607243dae | [
"MIT"
] | 13 | 2021-03-31T19:08:10.000Z | 2022-02-15T19:57:18.000Z | taxumap-manuscript-notebooks/embeddings.py | jsevo/taxumap | 1a02518dca822a65847994910177c74607243dae | [
"MIT"
] | 3 | 2021-09-22T19:21:36.000Z | 2022-02-10T21:39:35.000Z | from sklearn.manifold import TSNE
from sklearn.decomposition import PCA, KernelPCA
from umap import UMAP
from sklearn.preprocessing import MinMaxScaler
RUNEMBEDDINGS = False
if RUNEMBEDDINGS:
#simple PCA
pcaembedding = PCA(n_components=2).fit_transform(XASV.fillna(0))
#base embedding (kernel pca)
kernelpcaembedding = KernelPCA(n_components=2).fit_transform(XASV.fillna(0))
# non-phylo umap
embedding_non_phylo_unscaled = UMAP(n_neighbors=120,min_dist=0.2, metric="manhattan").fit_transform(XASV)
# embedding_non_phylo_scaled = UMAP(n_neighbors=120,min_dist=0.2, metric="manhattan").fit_transform(MinMaxScaler().fit_transform(XASV))
RUNTAXUMAPS = False
if RUNTAXUMAPS:
from taxumap.taxumap import taxumap
agg_levels = ["Phylum", "Family"]
withscaling = False # do not scale the columns of X
distanceperlevel = False # do not calculate a separate distance matrix at each phylogenetic level because we are using the manhattan distance
distancemetric = "manhattan"
printfigure=False
printwithdiversity=False #dont plot the average diversity in the background of the scatter plot
X_in = XASV
tax = taxonomy
withusercolors=taxonomy_meta[["HexColor"]]
# TAXUMAP, X_embedded, taxumap_Xscaled, taxumap_X = taxumap(agg_levels,
# withscaling,
# distanceperlevel,
# distancemetric,
# printfigure,
# printwithdiversity,
# X_in,
# tax,
# withusercolors,
# debug=True, #return tables
# save_embedding=False #save xy coordinates
# );
TAXUMAP_alllevels, X_embedded_alllevels, taxumap_Xscaled_alllevels, taxumap_X_alllevels = taxumap(["Phylum", "Class", "Order", "Family", "Genus"],
withscaling,
distanceperlevel,
distancemetric,
printfigure,
printwithdiversity,
X_in,
tax,
withusercolors,
debug=True, #return tables
save_embedding=False #save xy coordinates
);
# TAXUMAPSCALED, X_embedded_scaled, taxumap_Xscaled_scaled, taxumap_X_scaled = taxumap(
# agg_levels,
# True,
# False,
# "euclidean",
# printfigure,
# printwithdiversity,
# X_in,
# tax,
# withusercolors,
# debug=True, #return tables
# save_embedding=True#save xy coordinates
# );
# TAXUMAPSCALEDeuclidean, X_embedded_scaledeuclidean, taxumap_Xscaled_scaledeuclidean, taxumap_X_scaledeuclidean = taxumap(
# agg_levels,
# True,
# False,
# "euclidean",
# printfigure,
# printwithdiversity,
# X_in,
# tax,
# withusercolors,
# debug=True, #return tables
# save_embedding=True#save xy coordinates
# );
LOADPCoAS = False
if LOADPCoAS:
pcoa_embedding_unweighted_unifrac = PCA(n_components=2).fit_transform(unweighted_unifrac.set_index("SampleID"))
#Weighted Unifrac
pcoa_embedding_weighted_unifrac = PCA(n_components=2).fit_transform(weighted_unifrac.set_index("SampleID"))
del unweighted_unifrac
del weighted_unifrac
#del TAXUMAPSCALED, taxumap_Xscaled_scaled, taxumap_X_scaled
#del TAXUMAPSCALEDeuclidean, taxumap_Xscaled_scaledeuclidean, taxumap_X_scaledeuclidean
del TAXUMAP_alllevels, taxumap_Xscaled_alllevels, taxumap_X_alllevels
write_now=False
if write_now:
for (em,n) in zip(
[pcaembedding,
pcoa_embedding_unweighted_unifract[:,0:2],
pcoa_embedding_weighted_unifract,
embedding_non_phylo_unscaled,
X_embedded_alllevels.values,
X_embedded.values],
["pcaembedding",
"pcoa_unweighted_unifrac_embedding",
"pcoa_weighted_unifrac_embedding",
"embedding_nontax_umap_unscaled",
"taxumap_alllevels",
"current_taxumap_embedding"]):
pd.DataFrame(em, index=XASV.index).to_csv("results/%s.csv"%n) | 40.62069 | 150 | 0.574278 | from sklearn.manifold import TSNE
from sklearn.decomposition import PCA, KernelPCA
from umap import UMAP
from sklearn.preprocessing import MinMaxScaler
RUNEMBEDDINGS = False
if RUNEMBEDDINGS:
pcaembedding = PCA(n_components=2).fit_transform(XASV.fillna(0))
kernelpcaembedding = KernelPCA(n_components=2).fit_transform(XASV.fillna(0))
embedding_non_phylo_unscaled = UMAP(n_neighbors=120,min_dist=0.2, metric="manhattan").fit_transform(XASV)
RUNTAXUMAPS = False
if RUNTAXUMAPS:
from taxumap.taxumap import taxumap
agg_levels = ["Phylum", "Family"]
withscaling = False
distanceperlevel = False
distancemetric = "manhattan"
printfigure=False
printwithdiversity=False
X_in = XASV
tax = taxonomy
withusercolors=taxonomy_meta[["HexColor"]]
mbedded_alllevels, taxumap_Xscaled_alllevels, taxumap_X_alllevels = taxumap(["Phylum", "Class", "Order", "Family", "Genus"],
withscaling,
distanceperlevel,
distancemetric,
printfigure,
printwithdiversity,
X_in,
tax,
withusercolors,
debug=True,
save_embedding=False
);
unweighted_unifrac = PCA(n_components=2).fit_transform(unweighted_unifrac.set_index("SampleID"))
pcoa_embedding_weighted_unifrac = PCA(n_components=2).fit_transform(weighted_unifrac.set_index("SampleID"))
del unweighted_unifrac
del weighted_unifrac
del TAXUMAP_alllevels, taxumap_Xscaled_alllevels, taxumap_X_alllevels
write_now=False
if write_now:
for (em,n) in zip(
[pcaembedding,
pcoa_embedding_unweighted_unifract[:,0:2],
pcoa_embedding_weighted_unifract,
embedding_non_phylo_unscaled,
X_embedded_alllevels.values,
X_embedded.values],
["pcaembedding",
"pcoa_unweighted_unifrac_embedding",
"pcoa_weighted_unifrac_embedding",
"embedding_nontax_umap_unscaled",
"taxumap_alllevels",
"current_taxumap_embedding"]):
pd.DataFrame(em, index=XASV.index).to_csv("results/%s.csv"%n) | true | true |
f726e62af700d6cd869103c9f957465198c2bb6d | 218 | py | Python | structurizr/model/enterprise.py | sixty-north/structurizr-python | 856d0476935952c256981f3628663915768ee85e | [
"Apache-2.0"
] | 15 | 2017-07-20T20:43:40.000Z | 2021-11-12T11:25:01.000Z | structurizr/model/enterprise.py | sixty-north/structurizr-python | 856d0476935952c256981f3628663915768ee85e | [
"Apache-2.0"
] | 2 | 2017-06-05T17:41:05.000Z | 2018-09-11T08:18:07.000Z | structurizr/model/enterprise.py | sixty-north/structurizr-python | 856d0476935952c256981f3628663915768ee85e | [
"Apache-2.0"
] | 7 | 2017-08-16T19:51:24.000Z | 2020-09-24T09:47:35.000Z | class Enterprise:
def __init__(self, name):
if len(name.strip()) == 0:
raise ValueError("Name must be specified.")
self._name = name
def get_name(self):
return self._name
| 19.818182 | 55 | 0.577982 | class Enterprise:
def __init__(self, name):
if len(name.strip()) == 0:
raise ValueError("Name must be specified.")
self._name = name
def get_name(self):
return self._name
| true | true |
f726e62b1de4faf4969737dc866dadf797d1e5a6 | 3,616 | py | Python | reminder/admin/forms.py | luk-kop/event-reminder-apscheduler | 405c9731d340d111aac83094a93b06ec60256754 | [
"MIT"
] | 1 | 2021-04-02T11:07:12.000Z | 2021-04-02T11:07:12.000Z | reminder/admin/forms.py | luk-kop/event-reminder-apscheduler | 405c9731d340d111aac83094a93b06ec60256754 | [
"MIT"
] | 2 | 2021-03-20T22:04:50.000Z | 2021-06-09T07:02:36.000Z | reminder/admin/forms.py | luk-kop/event-reminder | 405c9731d340d111aac83094a93b06ec60256754 | [
"MIT"
] | null | null | null | from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField, SelectField, IntegerField
from wtforms.validators import InputRequired, EqualTo, Regexp, Length, NumberRange, Optional, Email
from reminder.custom_wtforms import MxRecordValidator
class NewUserForm(FlaskForm):
"""
Validators for a new user account.
"""
username = StringField(validators=[InputRequired(),
Length(min=3, max=40),
Regexp(regex='^[a-zA-Z0-9][a-zA-Z0-9\._-]{1,39}[a-zA-Z0-9]$',
message='Username should contain chars (min 3): a-z, A-Z, 0-9, . _ -')])
email = StringField(validators=[InputRequired(),
Email(message='Please enter valid email address'),
Length(max=70),
MxRecordValidator()])
role = SelectField(choices=[('user', 'User'), ('admin', 'Admin')])
access = SelectField(label='Can log in?',
choices=[('False', 'No'), ('True', 'Yes')])
pass_reset = SelectField(label='Change password on next login?',
choices=[('False', 'No'), ('True', 'Yes')])
password = PasswordField(validators=[Regexp(regex='^(?=.*[A-Za-z])(?=.*\d)(?=.*[@$!%*#?&])[A-Za-z\d@$!%*#?&]'
'{8,40}$',
message='Password must contain minimum 8 characters, at least one '
'letter, one number and one special character')])
password2 = PasswordField(label='Confirm password',
validators=[EqualTo('password')])
class EditUserForm(NewUserForm):
"""
Validators for the user being edited
"""
# the password field can be blank (empty) or match the regex pattern
password = PasswordField(label='Password',
validators=[Regexp(regex='^(?=.*[A-Za-z])(?=.*\d)(?=.*[@$!%*#?&])[A-Za-z\d@$!%*#?&]'
'{8,40}$|^$',
message='Password must contain minimum 8 characters, at least one '
'letter, one number and one special character')])
password2 = PasswordField(label='Confirm password', validators=[EqualTo('password')])
class NotifyForm(FlaskForm):
"""
Validators for notification settings
"""
notify_status = StringField(label='Notification status',
validators=[Regexp(regex='^on$'), Optional()])
notify_unit = SelectField('Notification interval time units',
choices=[('hours', 'hours'), ('minutes', 'minutes'), ('seconds', 'seconds')])
notify_interval = IntegerField(label='Notification interval',
validators=[InputRequired(), NumberRange(min=1)])
mail_server = StringField(label='Mail server',
validators=[InputRequired(), Length(max=70)])
mail_port = IntegerField(label='Mail port',
validators=[InputRequired(), NumberRange(min=1)])
mail_security = SelectField(label='Mail security',
choices=[('tls', 'TLS'), ('ssl', 'SSL')])
mail_username = StringField(label='Mail username',
validators=[InputRequired(), Length(max=70)])
mail_password = PasswordField(label='Mail Password') | 56.5 | 118 | 0.518252 | from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField, SelectField, IntegerField
from wtforms.validators import InputRequired, EqualTo, Regexp, Length, NumberRange, Optional, Email
from reminder.custom_wtforms import MxRecordValidator
class NewUserForm(FlaskForm):
username = StringField(validators=[InputRequired(),
Length(min=3, max=40),
Regexp(regex='^[a-zA-Z0-9][a-zA-Z0-9\._-]{1,39}[a-zA-Z0-9]$',
message='Username should contain chars (min 3): a-z, A-Z, 0-9, . _ -')])
email = StringField(validators=[InputRequired(),
Email(message='Please enter valid email address'),
Length(max=70),
MxRecordValidator()])
role = SelectField(choices=[('user', 'User'), ('admin', 'Admin')])
access = SelectField(label='Can log in?',
choices=[('False', 'No'), ('True', 'Yes')])
pass_reset = SelectField(label='Change password on next login?',
choices=[('False', 'No'), ('True', 'Yes')])
password = PasswordField(validators=[Regexp(regex='^(?=.*[A-Za-z])(?=.*\d)(?=.*[@$!%*#?&])[A-Za-z\d@$!%*#?&]'
'{8,40}$',
message='Password must contain minimum 8 characters, at least one '
'letter, one number and one special character')])
password2 = PasswordField(label='Confirm password',
validators=[EqualTo('password')])
class EditUserForm(NewUserForm):
password = PasswordField(label='Password',
validators=[Regexp(regex='^(?=.*[A-Za-z])(?=.*\d)(?=.*[@$!%*#?&])[A-Za-z\d@$!%*#?&]'
'{8,40}$|^$',
message='Password must contain minimum 8 characters, at least one '
'letter, one number and one special character')])
password2 = PasswordField(label='Confirm password', validators=[EqualTo('password')])
class NotifyForm(FlaskForm):
notify_status = StringField(label='Notification status',
validators=[Regexp(regex='^on$'), Optional()])
notify_unit = SelectField('Notification interval time units',
choices=[('hours', 'hours'), ('minutes', 'minutes'), ('seconds', 'seconds')])
notify_interval = IntegerField(label='Notification interval',
validators=[InputRequired(), NumberRange(min=1)])
mail_server = StringField(label='Mail server',
validators=[InputRequired(), Length(max=70)])
mail_port = IntegerField(label='Mail port',
validators=[InputRequired(), NumberRange(min=1)])
mail_security = SelectField(label='Mail security',
choices=[('tls', 'TLS'), ('ssl', 'SSL')])
mail_username = StringField(label='Mail username',
validators=[InputRequired(), Length(max=70)])
mail_password = PasswordField(label='Mail Password') | true | true |
f726e725cce6a2546e0dca558dcc54f0ee808e67 | 954 | py | Python | apps/node/src/app/main/users/role.py | AmrMKayid/PyGrid | 695a041649f7cfab6acc7d1495e2a6132f65d529 | [
"Apache-2.0"
] | 7 | 2020-04-20T22:22:08.000Z | 2020-07-25T17:32:08.000Z | apps/node/src/app/main/users/role.py | AmrMKayid/PyGrid | 695a041649f7cfab6acc7d1495e2a6132f65d529 | [
"Apache-2.0"
] | 3 | 2020-04-24T21:20:57.000Z | 2020-05-28T09:17:02.000Z | apps/node/src/app/main/users/role.py | AmrMKayid/PyGrid | 695a041649f7cfab6acc7d1495e2a6132f65d529 | [
"Apache-2.0"
] | 4 | 2020-04-24T22:32:37.000Z | 2020-05-25T19:29:20.000Z | from ... import BaseModel, db
class Role(BaseModel):
__tablename__ = "role"
id = db.Column(db.Integer(), primary_key=True, autoincrement=True)
name = db.Column(db.String())
can_triage_jobs = db.Column(db.Boolean())
can_edit_settings = db.Column(db.Boolean())
can_create_users = db.Column(db.Boolean())
can_create_groups = db.Column(db.Boolean())
can_edit_roles = db.Column(db.Boolean())
can_manage_infrastructure = db.Column(db.Boolean())
def __str__(self):
return (
f"<Role id: {self.id}, name: {self.name}, "
f"can_triage_jobs: {self.can_triage_jobs}, "
f"can_edit_settings: {self.can_edit_settings}, "
f"can_create_users: {self.can_create_users}, "
f"can_create_groups: {self.can_create_groups}, "
f"can_edit_roles: {self.can_edit_roles}, "
f"can_manage_infrastructure: {self.can_manage_infrastructure}>"
)
| 36.692308 | 75 | 0.645702 | from ... import BaseModel, db
class Role(BaseModel):
__tablename__ = "role"
id = db.Column(db.Integer(), primary_key=True, autoincrement=True)
name = db.Column(db.String())
can_triage_jobs = db.Column(db.Boolean())
can_edit_settings = db.Column(db.Boolean())
can_create_users = db.Column(db.Boolean())
can_create_groups = db.Column(db.Boolean())
can_edit_roles = db.Column(db.Boolean())
can_manage_infrastructure = db.Column(db.Boolean())
def __str__(self):
return (
f"<Role id: {self.id}, name: {self.name}, "
f"can_triage_jobs: {self.can_triage_jobs}, "
f"can_edit_settings: {self.can_edit_settings}, "
f"can_create_users: {self.can_create_users}, "
f"can_create_groups: {self.can_create_groups}, "
f"can_edit_roles: {self.can_edit_roles}, "
f"can_manage_infrastructure: {self.can_manage_infrastructure}>"
)
| true | true |
f726e78d6350d5f990597a123318cd9d4a4c9fb9 | 2,497 | py | Python | axol_node/plugins/resources/resource_get_all_roles.py | kelceydamage/axol | b5288577ee769bcd609c361cb0ac5e2a678289da | [
"Apache-2.0"
] | null | null | null | axol_node/plugins/resources/resource_get_all_roles.py | kelceydamage/axol | b5288577ee769bcd609c361cb0ac5e2a678289da | [
"Apache-2.0"
] | null | null | null | axol_node/plugins/resources/resource_get_all_roles.py | kelceydamage/axol | b5288577ee769bcd609c361cb0ac5e2a678289da | [
"Apache-2.0"
] | null | null | null | #! /usr/bin/env python
#-----------------------------------------#
#Copyright [2015] [Kelcey Jamison-Damage]
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
# Imports
#-----------------------------------------------------------------------#
from axol_common.classes.common_logger import CommonLogger
from axol_common.classes.common_data_object import GenericDataObject
from axol_common.classes.common_resource import CommonResource
from axol_common.distributed.axol_roledefs import generate_base_roles
from classes.axol_resource import AxolResource
from axol_config import api
from aapi.aapi import app as app
from flask import jsonify
from flask import request
class ResourceGetAllRoles(AxolResource):
"""docstring for ResourceGetAllRoles
Must implement:
_show_help
self.methods = {<method_type>: function}
self.source = {keyword}
request_{keyword}_api
calculate_new_fields
"""
required_post = {
'network': (True, u's'),
'profile': (False, u's')
}
def __init__(self):
super(ResourceGetAllRoles, self).__init__()
self.source = 'get_all_roles'
self.local = True
def _show_help(self):
return {
'Help': {
'api': '/api/get_all_roles',
'method': 'POST',
'required data': {
'network': '<internal, external>'
},
'version': api
}
}
@staticmethod
@app.route('/api/get_all_roles', methods=['POST', 'GET'])
def api_get_all_roles():
if request.method == 'GET':
return jsonify(ResourceGetAllRoles()._show_help())
try:
data = CommonResource.handle_request(request, ResourceGetAllRoles.required_post)
except Exception, e:
CommonLogger.log(e, 'get_all_roles', 'api_get_all_roles')
return jsonify({'response': {'error': str(e)}})
try:
roledefs = generate_base_roles(data.network)
except Exception, e:
CommonLogger.log(e, 'get_all_roles', 'api_get_all_roles')
return jsonify({'response': {'error': str(e)}})
return jsonify({'response': roledefs})
| 32.855263 | 84 | 0.682819 |
rom axol_common.classes.common_logger import CommonLogger
from axol_common.classes.common_data_object import GenericDataObject
from axol_common.classes.common_resource import CommonResource
from axol_common.distributed.axol_roledefs import generate_base_roles
from classes.axol_resource import AxolResource
from axol_config import api
from aapi.aapi import app as app
from flask import jsonify
from flask import request
class ResourceGetAllRoles(AxolResource):
"""docstring for ResourceGetAllRoles
Must implement:
_show_help
self.methods = {<method_type>: function}
self.source = {keyword}
request_{keyword}_api
calculate_new_fields
"""
required_post = {
'network': (True, u's'),
'profile': (False, u's')
}
def __init__(self):
super(ResourceGetAllRoles, self).__init__()
self.source = 'get_all_roles'
self.local = True
def _show_help(self):
return {
'Help': {
'api': '/api/get_all_roles',
'method': 'POST',
'required data': {
'network': '<internal, external>'
},
'version': api
}
}
@staticmethod
@app.route('/api/get_all_roles', methods=['POST', 'GET'])
def api_get_all_roles():
if request.method == 'GET':
return jsonify(ResourceGetAllRoles()._show_help())
try:
data = CommonResource.handle_request(request, ResourceGetAllRoles.required_post)
except Exception, e:
CommonLogger.log(e, 'get_all_roles', 'api_get_all_roles')
return jsonify({'response': {'error': str(e)}})
try:
roledefs = generate_base_roles(data.network)
except Exception, e:
CommonLogger.log(e, 'get_all_roles', 'api_get_all_roles')
return jsonify({'response': {'error': str(e)}})
return jsonify({'response': roledefs})
| false | true |
f726e80aacceea27942a112dde7b312235a8f554 | 35,185 | py | Python | sdks/python/apache_beam/typehints/decorators.py | VrishaliShah/beam | c27f5f724e38fbec829d9cf8920fac2bdedb7ca4 | [
"Apache-2.0"
] | null | null | null | sdks/python/apache_beam/typehints/decorators.py | VrishaliShah/beam | c27f5f724e38fbec829d9cf8920fac2bdedb7ca4 | [
"Apache-2.0"
] | 2 | 2021-08-25T16:17:07.000Z | 2022-02-10T04:23:10.000Z | sdks/python/apache_beam/typehints/decorators.py | VrishaliShah/beam | c27f5f724e38fbec829d9cf8920fac2bdedb7ca4 | [
"Apache-2.0"
] | 1 | 2020-01-16T17:00:26.000Z | 2020-01-16T17:00:26.000Z | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Type hinting decorators allowing static or runtime type-checking for the SDK.
This module defines decorators which utilize the type-hints defined in
'type_hints.py' to allow annotation of the types of function arguments and
return values.
Type-hints for functions are annotated using two separate decorators. One is for
type-hinting the types of function arguments, the other for type-hinting the
function return value. Type-hints can either be specified in the form of
positional arguments::
@with_input_types(int, int)
def add(a, b):
return a + b
Keyword arguments::
@with_input_types(a=int, b=int)
def add(a, b):
return a + b
Or even a mix of both::
@with_input_types(int, b=int)
def add(a, b):
return a + b
Example usage for type-hinting arguments only::
@with_input_types(s=str)
def to_lower(a):
return a.lower()
Example usage for type-hinting return values only::
@with_output_types(Tuple[int, bool])
def compress_point(ec_point):
return ec_point.x, ec_point.y < 0
Example usage for type-hinting both arguments and return values::
@with_input_types(a=int)
@with_output_types(str)
def int_to_str(a):
return str(a)
Type-hinting a function with arguments that unpack tuples are also supported
(in Python 2 only). As an example, such a function would be defined as::
def foo((a, b)):
...
The valid type-hint for such as function looks like the following::
@with_input_types(a=int, b=int)
def foo((a, b)):
...
Notice that we hint the type of each unpacked argument independently, rather
than hinting the type of the tuple as a whole (Tuple[int, int]).
Optionally, type-hints can be type-checked at runtime. To toggle this behavior
this module defines two functions: 'enable_run_time_type_checking' and
'disable_run_time_type_checking'. NOTE: for this toggle behavior to work
properly it must appear at the top of the module where all functions are
defined, or before importing a module containing type-hinted functions.
"""
# pytype: skip-file
from __future__ import absolute_import
import inspect
import itertools
import logging
import sys
import traceback
import types
from builtins import next
from builtins import object
from builtins import zip
from typing import Any
from typing import Callable
from typing import Dict
from typing import List
from typing import NamedTuple
from typing import Optional
from typing import Tuple
from typing import TypeVar
from apache_beam.typehints import native_type_compatibility
from apache_beam.typehints import typehints
from apache_beam.typehints.native_type_compatibility import convert_to_beam_type
from apache_beam.typehints.typehints import CompositeTypeHintError
from apache_beam.typehints.typehints import SimpleTypeHintError
from apache_beam.typehints.typehints import check_constraint
from apache_beam.typehints.typehints import validate_composite_type_param
try:
import funcsigs # Python 2 only.
except ImportError:
funcsigs = None
__all__ = [
'no_annotations',
'with_input_types',
'with_output_types',
'WithTypeHints',
'TypeCheckError',
]
T = TypeVar('T')
WithTypeHintsT = TypeVar('WithTypeHintsT', bound='WithTypeHints') # pylint: disable=invalid-name
# This is missing in the builtin types module. str.upper is arbitrary, any
# method on a C-implemented type will do.
# pylint: disable=invalid-name
_MethodDescriptorType = type(str.upper)
# pylint: enable=invalid-name
_ANY_VAR_POSITIONAL = typehints.Tuple[typehints.Any, ...]
_ANY_VAR_KEYWORD = typehints.Dict[typehints.Any, typehints.Any]
# TODO(BEAM-8280): Remove this when from_callable is ready to be enabled.
_enable_from_callable = False
try:
_original_getfullargspec = inspect.getfullargspec
_use_full_argspec = True
except AttributeError: # Python 2
_original_getfullargspec = inspect.getargspec # type: ignore
_use_full_argspec = False
def getfullargspec(func):
# Python 3: Use get_signature instead.
assert sys.version_info < (3, ), 'This method should not be used in Python 3'
try:
return _original_getfullargspec(func)
except TypeError:
if isinstance(func, type):
argspec = getfullargspec(func.__init__)
del argspec.args[0]
return argspec
elif callable(func):
try:
return _original_getfullargspec(func.__call__)
except TypeError:
# Return an ArgSpec with at least one positional argument,
# and any number of other (positional or keyword) arguments
# whose name won't match any real argument.
# Arguments with the %unknown% prefix will be ignored in the type
# checking code.
if _use_full_argspec:
return inspect.FullArgSpec(['_'],
'__unknown__varargs',
'__unknown__keywords', (), [], {}, {})
else: # Python 2
return inspect.ArgSpec(['_'],
'__unknown__varargs',
'__unknown__keywords', ())
else:
raise
def get_signature(func):
"""Like inspect.signature(), but supports Py2 as well.
This module uses inspect.signature instead of getfullargspec since in the
latter: 'the "self" parameter is always reported, even for bound methods'
https://github.com/python/cpython/blob/44f91c388a6f4da9ed3300df32ca290b8aa104ea/Lib/inspect.py#L1103
"""
# Fall back on funcsigs if inspect module doesn't have 'signature'; prefer
# inspect.signature over funcsigs.signature if both are available.
if hasattr(inspect, 'signature'):
inspect_ = inspect
else:
inspect_ = funcsigs
try:
signature = inspect_.signature(func)
except ValueError:
# Fall back on a catch-all signature.
params = [
inspect_.Parameter('_', inspect_.Parameter.POSITIONAL_OR_KEYWORD),
inspect_.Parameter(
'__unknown__varargs', inspect_.Parameter.VAR_POSITIONAL),
inspect_.Parameter(
'__unknown__keywords', inspect_.Parameter.VAR_KEYWORD)
]
signature = inspect_.Signature(params)
# This is a specialization to hint the first argument of certain builtins,
# such as str.strip.
if isinstance(func, _MethodDescriptorType):
params = list(signature.parameters.values())
if params[0].annotation == params[0].empty:
params[0] = params[0].replace(annotation=func.__objclass__)
signature = signature.replace(parameters=params)
# This is a specialization to hint the return value of type callables.
if (signature.return_annotation == signature.empty and
isinstance(func, type)):
signature = signature.replace(return_annotation=typehints.normalize(func))
return signature
def no_annotations(fn):
"""Decorator that prevents Beam from using type hint annotations on a
callable."""
setattr(fn, '_beam_no_annotations', True)
return fn
class IOTypeHints(NamedTuple(
'IOTypeHints',
[('input_types', Optional[Tuple[Tuple[Any, ...], Dict[str, Any]]]),
('output_types', Optional[Tuple[Tuple[Any, ...], Dict[str, Any]]]),
('origin', List[str])])):
"""Encapsulates all type hint information about a Dataflow construct.
This should primarily be used via the WithTypeHints mixin class, though
may also be attached to other objects (such as Python functions).
Attributes:
input_types: (tuple, dict) List of typing types, and an optional dictionary.
May be None. The list and dict correspond to args and kwargs.
output_types: (tuple, dict) List of typing types, and an optional dictionary
(unused). Only the first element of the list is used. May be None.
origin: (List[str]) Stack of tracebacks of method calls used to create this
instance.
"""
traceback_limit = 5
@classmethod
def _make_origin(cls, bases, tb=True, msg=()):
# type: (List[IOTypeHints], bool, List[str]) -> List[str]
if msg:
res = msg
else:
res = []
if tb:
# Omit this method and the IOTypeHints method that called it.
num_frames_skip = 2
tb = traceback.format_stack(limit=cls.traceback_limit +
num_frames_skip)[:-num_frames_skip]
# tb is a list of strings in the form of 'File ...\n[code]\n'. Split into
# single lines and flatten.
res += list(
itertools.chain.from_iterable(s.strip().split('\n') for s in tb))
bases = [base for base in bases if base.origin]
if bases:
res += ['', 'based on:']
for i, base in enumerate(bases):
if i > 0:
res += ['', 'and:']
res += [' ' + str(base)]
res += [' ' + s for s in base.origin]
return res
@classmethod
def empty(cls):
# type: () -> IOTypeHints
"""Construct a base IOTypeHints object with no hints."""
return IOTypeHints(None, None, [])
@classmethod
def from_callable(cls, fn):
# type: (Callable) -> Optional[IOTypeHints]
"""Construct an IOTypeHints object from a callable's signature.
Supports Python 3 annotations. For partial annotations, sets unknown types
to Any, _ANY_VAR_POSITIONAL, or _ANY_VAR_KEYWORD.
Returns:
A new IOTypeHints or None if no annotations found.
"""
if not _enable_from_callable or getattr(fn, '_beam_no_annotations', False):
return None
signature = get_signature(fn)
if (all(param.annotation == param.empty
for param in signature.parameters.values()) and
signature.return_annotation == signature.empty):
return None
input_args = []
input_kwargs = {}
for param in signature.parameters.values():
if param.annotation == param.empty:
if param.kind == param.VAR_POSITIONAL:
input_args.append(_ANY_VAR_POSITIONAL)
elif param.kind == param.VAR_KEYWORD:
input_kwargs[param.name] = _ANY_VAR_KEYWORD
elif param.kind == param.KEYWORD_ONLY:
input_kwargs[param.name] = typehints.Any
else:
input_args.append(typehints.Any)
else:
if param.kind in [param.KEYWORD_ONLY, param.VAR_KEYWORD]:
input_kwargs[param.name] = convert_to_beam_type(param.annotation)
else:
assert param.kind in [param.POSITIONAL_ONLY,
param.POSITIONAL_OR_KEYWORD,
param.VAR_POSITIONAL], \
'Unsupported Parameter kind: %s' % param.kind
input_args.append(convert_to_beam_type(param.annotation))
output_args = []
if signature.return_annotation != signature.empty:
output_args.append(convert_to_beam_type(signature.return_annotation))
else:
output_args.append(typehints.Any)
name = getattr(fn, '__name__', '<unknown>')
msg = ['from_callable(%s)' % name, ' signature: %s' % signature]
if hasattr(fn, '__code__'):
msg.append(
' File "%s", line %d' %
(fn.__code__.co_filename, fn.__code__.co_firstlineno))
return IOTypeHints(
input_types=(tuple(input_args), input_kwargs),
output_types=(tuple(output_args), {}),
origin=cls._make_origin([], tb=False, msg=msg))
def with_input_types(self, *args, **kwargs):
# type: (...) -> IOTypeHints
return self._replace(
input_types=(args, kwargs), origin=self._make_origin([self]))
def with_output_types(self, *args, **kwargs):
# type: (...) -> IOTypeHints
return self._replace(
output_types=(args, kwargs), origin=self._make_origin([self]))
def simple_output_type(self, context):
if self._has_output_types():
args, kwargs = self.output_types
if len(args) != 1 or kwargs:
raise TypeError(
'Expected single output type hint for %s but got: %s' %
(context, self.output_types))
return args[0]
def has_simple_output_type(self):
"""Whether there's a single positional output type."""
return (
self.output_types and len(self.output_types[0]) == 1 and
not self.output_types[1])
def strip_iterable(self):
# type: () -> IOTypeHints
"""Removes outer Iterable (or equivalent) from output type.
Only affects instances with simple output types, otherwise is a no-op.
Does not modify self.
Designed to be used with type hints from callables of ParDo, FlatMap, DoFn.
Output type may be Optional[T], in which case the result of stripping T is
used as the output type.
Output type may be None/NoneType, in which case nothing is done.
Example: Generator[Tuple(int, int)] becomes Tuple(int, int)
Returns:
A copy of this instance with a possibly different output type.
Raises:
ValueError if output type is simple and not iterable.
"""
if self.output_types is None or not self.has_simple_output_type():
return self
output_type = self.output_types[0][0]
if output_type is None or isinstance(output_type, type(None)):
return self
# If output_type == Optional[T]: output_type = T.
if isinstance(output_type, typehints.UnionConstraint):
types = list(output_type.union_types)
if len(types) == 2:
try:
types.remove(type(None))
output_type = types[0]
except ValueError:
pass
yielded_type = typehints.get_yielded_type(output_type)
return self._replace(
output_types=((yielded_type, ), {}),
origin=self._make_origin([self], tb=False, msg=['strip_iterable()']))
def with_defaults(self, hints):
# type: (Optional[IOTypeHints]) -> IOTypeHints
if not hints:
return self
if not self:
return hints
if self._has_input_types():
input_types = self.input_types
else:
input_types = hints.input_types
if self._has_output_types():
output_types = self.output_types
else:
output_types = hints.output_types
res = IOTypeHints(
input_types,
output_types,
self._make_origin([self, hints], tb=False, msg=['with_defaults()']))
if res == self:
return self # Don't needlessly increase origin traceback length.
else:
return res
def _has_input_types(self):
return self.input_types is not None and any(self.input_types)
def _has_output_types(self):
return self.output_types is not None and any(self.output_types)
def __bool__(self):
return self._has_input_types() or self._has_output_types()
def __repr__(self):
return 'IOTypeHints[inputs=%s, outputs=%s]' % (
self.input_types, self.output_types)
def debug_str(self):
return '\n'.join([self.__repr__()] + self.origin)
def __eq__(self, other):
def same(a, b):
if a is None or not any(a):
return b is None or not any(b)
else:
return a == b
return (
same(self.input_types, other.input_types) and
same(self.output_types, other.output_types))
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash(str(self))
def __reduce__(self):
# Don't include "origin" debug information in pickled form.
return (IOTypeHints, (self.input_types, self.output_types, []))
class WithTypeHints(object):
"""A mixin class that provides the ability to set and retrieve type hints.
"""
def __init__(self, *unused_args, **unused_kwargs):
self._type_hints = IOTypeHints.empty()
def _get_or_create_type_hints(self):
# type: () -> IOTypeHints
# __init__ may have not been called
try:
# Only return an instance bound to self (see BEAM-8629).
return self.__dict__['_type_hints']
except KeyError:
self._type_hints = IOTypeHints.empty()
return self._type_hints
def get_type_hints(self):
"""Gets and/or initializes type hints for this object.
If type hints have not been set, attempts to initialize type hints in this
order:
- Using self.default_type_hints().
- Using self.__class__ type hints.
"""
return (
self._get_or_create_type_hints().with_defaults(
self.default_type_hints()).with_defaults(
get_type_hints(self.__class__)))
def default_type_hints(self):
return None
def with_input_types(self, *arg_hints, **kwarg_hints):
# type: (WithTypeHintsT, *Any, **Any) -> WithTypeHintsT
arg_hints = native_type_compatibility.convert_to_beam_types(arg_hints)
kwarg_hints = native_type_compatibility.convert_to_beam_types(kwarg_hints)
self._type_hints = self._get_or_create_type_hints().with_input_types(
*arg_hints, **kwarg_hints)
return self
def with_output_types(self, *arg_hints, **kwarg_hints):
# type: (WithTypeHintsT, *Any, **Any) -> WithTypeHintsT
arg_hints = native_type_compatibility.convert_to_beam_types(arg_hints)
kwarg_hints = native_type_compatibility.convert_to_beam_types(kwarg_hints)
self._type_hints = self._get_or_create_type_hints().with_output_types(
*arg_hints, **kwarg_hints)
return self
class TypeCheckError(Exception):
pass
def _positional_arg_hints(arg, hints):
"""Returns the type of a (possibly tuple-packed) positional argument.
E.g. for lambda ((a, b), c): None the single positional argument is (as
returned by inspect) [[a, b], c] which should have type
Tuple[Tuple[Int, Any], float] when applied to the type hints
{a: int, b: Any, c: float}.
"""
if isinstance(arg, list):
return typehints.Tuple[[_positional_arg_hints(a, hints) for a in arg]]
return hints.get(arg, typehints.Any)
def _unpack_positional_arg_hints(arg, hint):
"""Unpacks the given hint according to the nested structure of arg.
For example, if arg is [[a, b], c] and hint is Tuple[Any, int], then
this function would return ((Any, Any), int) so it can be used in conjunction
with inspect.getcallargs.
"""
if isinstance(arg, list):
tuple_constraint = typehints.Tuple[[typehints.Any] * len(arg)]
if not typehints.is_consistent_with(hint, tuple_constraint):
raise TypeCheckError(
'Bad tuple arguments for %s: expected %s, got %s' %
(arg, tuple_constraint, hint))
if isinstance(hint, typehints.TupleConstraint):
return tuple(
_unpack_positional_arg_hints(a, t) for a,
t in zip(arg, hint.tuple_types))
return (typehints.Any, ) * len(arg)
return hint
def getcallargs_forhints(func, *typeargs, **typekwargs):
"""Like inspect.getcallargs, with support for declaring default args as Any.
In Python 2, understands that Tuple[] and an Any unpack.
Returns:
(Dict[str, Any]) A dictionary from arguments names to values.
"""
if sys.version_info < (3, ):
return getcallargs_forhints_impl_py2(func, typeargs, typekwargs)
else:
return getcallargs_forhints_impl_py3(func, typeargs, typekwargs)
def getcallargs_forhints_impl_py2(func, typeargs, typekwargs):
argspec = getfullargspec(func)
# Turn Tuple[x, y] into (x, y) so getcallargs can do the proper unpacking.
packed_typeargs = [
_unpack_positional_arg_hints(arg, hint)
for (arg, hint) in zip(argspec.args, typeargs)
]
packed_typeargs += list(typeargs[len(packed_typeargs):])
# Monkeypatch inspect.getfullargspec to allow passing non-function objects.
# getfullargspec (getargspec on Python 2) are used by inspect.getcallargs.
# TODO(BEAM-5490): Reimplement getcallargs and stop relying on monkeypatch.
inspect.getargspec = getfullargspec
try:
callargs = inspect.getcallargs(func, *packed_typeargs, **typekwargs) # pylint: disable=deprecated-method
except TypeError as e:
raise TypeCheckError(e)
finally:
# Revert monkey-patch.
inspect.getargspec = _original_getfullargspec
if argspec.defaults:
# Declare any default arguments to be Any.
for k, var in enumerate(reversed(argspec.args)):
if k >= len(argspec.defaults):
break
if callargs.get(var, None) is argspec.defaults[-k - 1]:
callargs[var] = typehints.Any
# Patch up varargs and keywords
if argspec.varargs:
# TODO(BEAM-8122): This will always assign _ANY_VAR_POSITIONAL. Should be
# "callargs.get(...) or _ANY_VAR_POSITIONAL".
callargs[argspec.varargs] = typekwargs.get(
argspec.varargs, _ANY_VAR_POSITIONAL)
varkw = argspec.keywords
if varkw:
# TODO(robertwb): Consider taking the union of key and value types.
callargs[varkw] = typekwargs.get(varkw, _ANY_VAR_KEYWORD)
# TODO(BEAM-5878) Support kwonlyargs.
return callargs
def _normalize_var_positional_hint(hint):
"""Converts a var_positional hint into Tuple[Union[<types>], ...] form.
Args:
hint: (tuple) Should be either a tuple of one or more types, or a single
Tuple[<type>, ...].
Raises:
TypeCheckError if hint does not have the right form.
"""
if not hint or type(hint) != tuple:
raise TypeCheckError('Unexpected VAR_POSITIONAL value: %s' % hint)
if len(hint) == 1 and isinstance(hint[0], typehints.TupleSequenceConstraint):
# Example: tuple(Tuple[Any, ...]) -> Tuple[Any, ...]
return hint[0]
else:
# Example: tuple(int, str) -> Tuple[Union[int, str], ...]
return typehints.Tuple[typehints.Union[hint], ...]
def _normalize_var_keyword_hint(hint, arg_name):
"""Converts a var_keyword hint into Dict[<key type>, <value type>] form.
Args:
hint: (dict) Should either contain a pair (arg_name,
Dict[<key type>, <value type>]), or one or more possible types for the
value.
arg_name: (str) The keyword receiving this hint.
Raises:
TypeCheckError if hint does not have the right form.
"""
if not hint or type(hint) != dict:
raise TypeCheckError('Unexpected VAR_KEYWORD value: %s' % hint)
keys = list(hint.keys())
values = list(hint.values())
if (len(values) == 1 and keys[0] == arg_name and
isinstance(values[0], typehints.DictConstraint)):
# Example: dict(kwargs=Dict[str, Any]) -> Dict[str, Any]
return values[0]
else:
# Example: dict(k1=str, k2=int) -> Dict[str, Union[str,int]]
return typehints.Dict[str, typehints.Union[values]]
def getcallargs_forhints_impl_py3(func, type_args, type_kwargs):
"""Bind type_args and type_kwargs to func.
Works like inspect.getcallargs, with some modifications to support type hint
checks.
For unbound args, will use annotations and fall back to Any (or variants of
Any).
Returns:
A mapping from parameter name to argument.
"""
try:
signature = get_signature(func)
except ValueError as e:
logging.warning('Could not get signature for function: %s: %s', func, e)
return {}
try:
bindings = signature.bind(*type_args, **type_kwargs)
except TypeError as e:
# Might be raised due to too few or too many arguments.
raise TypeCheckError(e)
bound_args = bindings.arguments
for param in signature.parameters.values():
if param.name in bound_args:
# Bound: unpack/convert variadic arguments.
if param.kind == param.VAR_POSITIONAL:
bound_args[param.name] = _normalize_var_positional_hint(
bound_args[param.name])
elif param.kind == param.VAR_KEYWORD:
bound_args[param.name] = _normalize_var_keyword_hint(
bound_args[param.name], param.name)
else:
# Unbound: must have a default or be variadic.
if param.annotation != param.empty:
bound_args[param.name] = param.annotation
elif param.kind == param.VAR_POSITIONAL:
bound_args[param.name] = _ANY_VAR_POSITIONAL
elif param.kind == param.VAR_KEYWORD:
bound_args[param.name] = _ANY_VAR_KEYWORD
elif param.default is not param.empty:
# Declare unbound parameters with defaults to be Any.
bound_args[param.name] = typehints.Any
else:
# This case should be caught by signature.bind() above.
raise ValueError('Unexpected unbound parameter: %s' % param.name)
return dict(bound_args)
def get_type_hints(fn):
# type: (Any) -> IOTypeHints
"""Gets the type hint associated with an arbitrary object fn.
Always returns a valid IOTypeHints object, creating one if necessary.
"""
# pylint: disable=protected-access
if not hasattr(fn, '_type_hints'):
try:
fn._type_hints = IOTypeHints.empty()
except (AttributeError, TypeError):
# Can't add arbitrary attributes to this object,
# but might have some restrictions anyways...
hints = IOTypeHints.empty()
# Python 3.7 introduces annotations for _MethodDescriptorTypes.
if isinstance(fn, _MethodDescriptorType) and sys.version_info < (3, 7):
hints = hints.with_input_types(fn.__objclass__) # type: ignore
return hints
return fn._type_hints
# pylint: enable=protected-access
def with_input_types(*positional_hints, **keyword_hints):
# type: (*Any, **Any) -> Callable[[T], T]
"""A decorator that type-checks defined type-hints with passed func arguments.
All type-hinted arguments can be specified using positional arguments,
keyword arguments, or a mix of both. Additionaly, all function arguments must
be type-hinted in totality if even one parameter is type-hinted.
Once fully decorated, if the arguments passed to the resulting function
violate the type-hint constraints defined, a :class:`TypeCheckError`
detailing the error will be raised.
To be used as:
.. testcode::
from apache_beam.typehints import with_input_types
@with_input_types(str)
def upper(s):
return s.upper()
Or:
.. testcode::
from apache_beam.typehints import with_input_types
from apache_beam.typehints import List
from apache_beam.typehints import Tuple
@with_input_types(ls=List[Tuple[int, int]])
def increment(ls):
[(i + 1, j + 1) for (i,j) in ls]
Args:
*positional_hints: Positional type-hints having identical order as the
function's formal arguments. Values for this argument must either be a
built-in Python type or an instance of a
:class:`~apache_beam.typehints.typehints.TypeConstraint` created by
'indexing' a
:class:`~apache_beam.typehints.typehints.CompositeTypeHint` instance
with a type parameter.
**keyword_hints: Keyword arguments mirroring the names of the parameters to
the decorated functions. The value of each keyword argument must either
be one of the allowed built-in Python types, a custom class, or an
instance of a :class:`~apache_beam.typehints.typehints.TypeConstraint`
created by 'indexing' a
:class:`~apache_beam.typehints.typehints.CompositeTypeHint` instance
with a type parameter.
Raises:
:class:`ValueError`: If not all function arguments have
corresponding type-hints specified. Or if the inner wrapper function isn't
passed a function object.
:class:`TypeCheckError`: If the any of the passed type-hint
constraints are not a type or
:class:`~apache_beam.typehints.typehints.TypeConstraint` instance.
Returns:
The original function decorated such that it enforces type-hint constraints
for all received function arguments.
"""
converted_positional_hints = (
native_type_compatibility.convert_to_beam_types(positional_hints))
converted_keyword_hints = (
native_type_compatibility.convert_to_beam_types(keyword_hints))
del positional_hints
del keyword_hints
def annotate_input_types(f):
if isinstance(f, types.FunctionType):
for t in (list(converted_positional_hints) +
list(converted_keyword_hints.values())):
validate_composite_type_param(
t, error_msg_prefix='All type hint arguments')
th = getattr(f, '_type_hints', IOTypeHints.empty()).with_input_types(
*converted_positional_hints, **converted_keyword_hints)
f._type_hints = th # pylint: disable=protected-access
return f
return annotate_input_types
def with_output_types(*return_type_hint, **kwargs):
# type: (*Any, **Any) -> Callable[[T], T]
"""A decorator that type-checks defined type-hints for return values(s).
This decorator will type-check the return value(s) of the decorated function.
Only a single type-hint is accepted to specify the return type of the return
value. If the function to be decorated has multiple return values, then one
should use: ``Tuple[type_1, type_2]`` to annotate the types of the return
values.
If the ultimate return value for the function violates the specified type-hint
a :class:`TypeCheckError` will be raised detailing the type-constraint
violation.
This decorator is intended to be used like:
.. testcode::
from apache_beam.typehints import with_output_types
from apache_beam.typehints import Set
class Coordinate(object):
def __init__(self, x, y):
self.x = x
self.y = y
@with_output_types(Set[Coordinate])
def parse_ints(ints):
return {Coordinate(i, i) for i in ints}
Or with a simple type-hint:
.. testcode::
from apache_beam.typehints import with_output_types
@with_output_types(bool)
def negate(p):
return not p if p else p
Args:
*return_type_hint: A type-hint specifying the proper return type of the
function. This argument should either be a built-in Python type or an
instance of a :class:`~apache_beam.typehints.typehints.TypeConstraint`
created by 'indexing' a
:class:`~apache_beam.typehints.typehints.CompositeTypeHint`.
**kwargs: Not used.
Raises:
:class:`ValueError`: If any kwarg parameters are passed in,
or the length of **return_type_hint** is greater than ``1``. Or if the
inner wrapper function isn't passed a function object.
:class:`TypeCheckError`: If the **return_type_hint** object is
in invalid type-hint.
Returns:
The original function decorated such that it enforces type-hint constraints
for all return values.
"""
if kwargs:
raise ValueError(
"All arguments for the 'returns' decorator must be "
"positional arguments.")
if len(return_type_hint) != 1:
raise ValueError(
"'returns' accepts only a single positional argument. In "
"order to specify multiple return types, use the 'Tuple' "
"type-hint.")
return_type_hint = native_type_compatibility.convert_to_beam_type(
return_type_hint[0])
validate_composite_type_param(
return_type_hint, error_msg_prefix='All type hint arguments')
def annotate_output_types(f):
th = getattr(f, '_type_hints', IOTypeHints.empty())
f._type_hints = th.with_output_types(return_type_hint) # pylint: disable=protected-access
return f
return annotate_output_types
def _check_instance_type(
type_constraint, instance, var_name=None, verbose=False):
"""A helper function to report type-hint constraint violations.
Args:
type_constraint: An instance of a 'TypeConstraint' or a built-in Python
type.
instance: The candidate object which will be checked by to satisfy
'type_constraint'.
var_name: If 'instance' is an argument, then the actual name for the
parameter in the original function definition.
Raises:
TypeCheckError: If 'instance' fails to meet the type-constraint of
'type_constraint'.
"""
hint_type = (
"argument: '%s'" % var_name if var_name is not None else 'return type')
try:
check_constraint(type_constraint, instance)
except SimpleTypeHintError:
if verbose:
verbose_instance = '%s, ' % instance
else:
verbose_instance = ''
raise TypeCheckError(
'Type-hint for %s violated. Expected an '
'instance of %s, instead found %san instance of %s.' %
(hint_type, type_constraint, verbose_instance, type(instance)))
except CompositeTypeHintError as e:
raise TypeCheckError('Type-hint for %s violated: %s' % (hint_type, e))
def _interleave_type_check(type_constraint, var_name=None):
"""Lazily type-check the type-hint for a lazily generated sequence type.
This function can be applied as a decorator or called manually in a curried
manner:
* @_interleave_type_check(List[int])
def gen():
yield 5
or
* gen = _interleave_type_check(Tuple[int, int], 'coord_gen')(gen)
As a result, all type-checking for the passed generator will occur at 'yield'
time. This way, we avoid having to depleat the generator in order to
type-check it.
Args:
type_constraint: An instance of a TypeConstraint. The output yielded of
'gen' will be type-checked according to this type constraint.
var_name: The variable name binded to 'gen' if type-checking a function
argument. Used solely for templating in error message generation.
Returns:
A function which takes a generator as an argument and returns a wrapped
version of the generator that interleaves type-checking at 'yield'
iteration. If the generator received is already wrapped, then it is simply
returned to avoid nested wrapping.
"""
def wrapper(gen):
if isinstance(gen, GeneratorWrapper):
return gen
return GeneratorWrapper(
gen, lambda x: _check_instance_type(type_constraint, x, var_name))
return wrapper
class GeneratorWrapper(object):
"""A wrapper around a generator, allows execution of a callback per yield.
Additionally, wrapping a generator with this class allows one to assign
arbitary attributes to a generator object just as with a function object.
Attributes:
internal_gen: A instance of a generator object. As part of 'step' of the
generator, the yielded object will be passed to 'interleave_func'.
interleave_func: A callback accepting a single argument. This function will
be called with the result of each yielded 'step' in the internal
generator.
"""
def __init__(self, gen, interleave_func):
self.internal_gen = gen
self.interleave_func = interleave_func
def __getattr__(self, attr):
# TODO(laolu): May also want to intercept 'send' in the future if we move to
# a GeneratorHint with 3 type-params:
# * Generator[send_type, return_type, yield_type]
if attr == '__next__':
return self.__next__()
elif attr == '__iter__':
return self.__iter__()
return getattr(self.internal_gen, attr)
def __next__(self):
next_val = next(self.internal_gen)
self.interleave_func(next_val)
return next_val
next = __next__
def __iter__(self):
for x in self.internal_gen:
self.interleave_func(x)
yield x
| 34.562868 | 109 | 0.699986 |
from __future__ import absolute_import
import inspect
import itertools
import logging
import sys
import traceback
import types
from builtins import next
from builtins import object
from builtins import zip
from typing import Any
from typing import Callable
from typing import Dict
from typing import List
from typing import NamedTuple
from typing import Optional
from typing import Tuple
from typing import TypeVar
from apache_beam.typehints import native_type_compatibility
from apache_beam.typehints import typehints
from apache_beam.typehints.native_type_compatibility import convert_to_beam_type
from apache_beam.typehints.typehints import CompositeTypeHintError
from apache_beam.typehints.typehints import SimpleTypeHintError
from apache_beam.typehints.typehints import check_constraint
from apache_beam.typehints.typehints import validate_composite_type_param
try:
import funcsigs
except ImportError:
funcsigs = None
__all__ = [
'no_annotations',
'with_input_types',
'with_output_types',
'WithTypeHints',
'TypeCheckError',
]
T = TypeVar('T')
WithTypeHintsT = TypeVar('WithTypeHintsT', bound='WithTypeHints')
_MethodDescriptorType = type(str.upper)
_ANY_VAR_POSITIONAL = typehints.Tuple[typehints.Any, ...]
_ANY_VAR_KEYWORD = typehints.Dict[typehints.Any, typehints.Any]
_enable_from_callable = False
try:
_original_getfullargspec = inspect.getfullargspec
_use_full_argspec = True
except AttributeError:
_original_getfullargspec = inspect.getargspec
_use_full_argspec = False
def getfullargspec(func):
assert sys.version_info < (3, ), 'This method should not be used in Python 3'
try:
return _original_getfullargspec(func)
except TypeError:
if isinstance(func, type):
argspec = getfullargspec(func.__init__)
del argspec.args[0]
return argspec
elif callable(func):
try:
return _original_getfullargspec(func.__call__)
except TypeError:
# Arguments with the %unknown% prefix will be ignored in the type
# checking code.
if _use_full_argspec:
return inspect.FullArgSpec(['_'],
'__unknown__varargs',
'__unknown__keywords', (), [], {}, {})
else: # Python 2
return inspect.ArgSpec(['_'],
'__unknown__varargs',
'__unknown__keywords', ())
else:
raise
def get_signature(func):
# Fall back on funcsigs if inspect module doesn't have 'signature'; prefer
if hasattr(inspect, 'signature'):
inspect_ = inspect
else:
inspect_ = funcsigs
try:
signature = inspect_.signature(func)
except ValueError:
params = [
inspect_.Parameter('_', inspect_.Parameter.POSITIONAL_OR_KEYWORD),
inspect_.Parameter(
'__unknown__varargs', inspect_.Parameter.VAR_POSITIONAL),
inspect_.Parameter(
'__unknown__keywords', inspect_.Parameter.VAR_KEYWORD)
]
signature = inspect_.Signature(params)
if isinstance(func, _MethodDescriptorType):
params = list(signature.parameters.values())
if params[0].annotation == params[0].empty:
params[0] = params[0].replace(annotation=func.__objclass__)
signature = signature.replace(parameters=params)
if (signature.return_annotation == signature.empty and
isinstance(func, type)):
signature = signature.replace(return_annotation=typehints.normalize(func))
return signature
def no_annotations(fn):
setattr(fn, '_beam_no_annotations', True)
return fn
class IOTypeHints(NamedTuple(
'IOTypeHints',
[('input_types', Optional[Tuple[Tuple[Any, ...], Dict[str, Any]]]),
('output_types', Optional[Tuple[Tuple[Any, ...], Dict[str, Any]]]),
('origin', List[str])])):
traceback_limit = 5
@classmethod
def _make_origin(cls, bases, tb=True, msg=()):
if msg:
res = msg
else:
res = []
if tb:
num_frames_skip = 2
tb = traceback.format_stack(limit=cls.traceback_limit +
num_frames_skip)[:-num_frames_skip]
res += list(
itertools.chain.from_iterable(s.strip().split('\n') for s in tb))
bases = [base for base in bases if base.origin]
if bases:
res += ['', 'based on:']
for i, base in enumerate(bases):
if i > 0:
res += ['', 'and:']
res += [' ' + str(base)]
res += [' ' + s for s in base.origin]
return res
@classmethod
def empty(cls):
return IOTypeHints(None, None, [])
@classmethod
def from_callable(cls, fn):
if not _enable_from_callable or getattr(fn, '_beam_no_annotations', False):
return None
signature = get_signature(fn)
if (all(param.annotation == param.empty
for param in signature.parameters.values()) and
signature.return_annotation == signature.empty):
return None
input_args = []
input_kwargs = {}
for param in signature.parameters.values():
if param.annotation == param.empty:
if param.kind == param.VAR_POSITIONAL:
input_args.append(_ANY_VAR_POSITIONAL)
elif param.kind == param.VAR_KEYWORD:
input_kwargs[param.name] = _ANY_VAR_KEYWORD
elif param.kind == param.KEYWORD_ONLY:
input_kwargs[param.name] = typehints.Any
else:
input_args.append(typehints.Any)
else:
if param.kind in [param.KEYWORD_ONLY, param.VAR_KEYWORD]:
input_kwargs[param.name] = convert_to_beam_type(param.annotation)
else:
assert param.kind in [param.POSITIONAL_ONLY,
param.POSITIONAL_OR_KEYWORD,
param.VAR_POSITIONAL], \
'Unsupported Parameter kind: %s' % param.kind
input_args.append(convert_to_beam_type(param.annotation))
output_args = []
if signature.return_annotation != signature.empty:
output_args.append(convert_to_beam_type(signature.return_annotation))
else:
output_args.append(typehints.Any)
name = getattr(fn, '__name__', '<unknown>')
msg = ['from_callable(%s)' % name, ' signature: %s' % signature]
if hasattr(fn, '__code__'):
msg.append(
' File "%s", line %d' %
(fn.__code__.co_filename, fn.__code__.co_firstlineno))
return IOTypeHints(
input_types=(tuple(input_args), input_kwargs),
output_types=(tuple(output_args), {}),
origin=cls._make_origin([], tb=False, msg=msg))
def with_input_types(self, *args, **kwargs):
return self._replace(
input_types=(args, kwargs), origin=self._make_origin([self]))
def with_output_types(self, *args, **kwargs):
return self._replace(
output_types=(args, kwargs), origin=self._make_origin([self]))
def simple_output_type(self, context):
if self._has_output_types():
args, kwargs = self.output_types
if len(args) != 1 or kwargs:
raise TypeError(
'Expected single output type hint for %s but got: %s' %
(context, self.output_types))
return args[0]
def has_simple_output_type(self):
return (
self.output_types and len(self.output_types[0]) == 1 and
not self.output_types[1])
def strip_iterable(self):
if self.output_types is None or not self.has_simple_output_type():
return self
output_type = self.output_types[0][0]
if output_type is None or isinstance(output_type, type(None)):
return self
if isinstance(output_type, typehints.UnionConstraint):
types = list(output_type.union_types)
if len(types) == 2:
try:
types.remove(type(None))
output_type = types[0]
except ValueError:
pass
yielded_type = typehints.get_yielded_type(output_type)
return self._replace(
output_types=((yielded_type, ), {}),
origin=self._make_origin([self], tb=False, msg=['strip_iterable()']))
def with_defaults(self, hints):
if not hints:
return self
if not self:
return hints
if self._has_input_types():
input_types = self.input_types
else:
input_types = hints.input_types
if self._has_output_types():
output_types = self.output_types
else:
output_types = hints.output_types
res = IOTypeHints(
input_types,
output_types,
self._make_origin([self, hints], tb=False, msg=['with_defaults()']))
if res == self:
return self
else:
return res
def _has_input_types(self):
return self.input_types is not None and any(self.input_types)
def _has_output_types(self):
return self.output_types is not None and any(self.output_types)
def __bool__(self):
return self._has_input_types() or self._has_output_types()
def __repr__(self):
return 'IOTypeHints[inputs=%s, outputs=%s]' % (
self.input_types, self.output_types)
def debug_str(self):
return '\n'.join([self.__repr__()] + self.origin)
def __eq__(self, other):
def same(a, b):
if a is None or not any(a):
return b is None or not any(b)
else:
return a == b
return (
same(self.input_types, other.input_types) and
same(self.output_types, other.output_types))
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash(str(self))
def __reduce__(self):
# Don't include "origin" debug information in pickled form.
return (IOTypeHints, (self.input_types, self.output_types, []))
class WithTypeHints(object):
def __init__(self, *unused_args, **unused_kwargs):
self._type_hints = IOTypeHints.empty()
def _get_or_create_type_hints(self):
try:
return self.__dict__['_type_hints']
except KeyError:
self._type_hints = IOTypeHints.empty()
return self._type_hints
def get_type_hints(self):
return (
self._get_or_create_type_hints().with_defaults(
self.default_type_hints()).with_defaults(
get_type_hints(self.__class__)))
def default_type_hints(self):
return None
def with_input_types(self, *arg_hints, **kwarg_hints):
arg_hints = native_type_compatibility.convert_to_beam_types(arg_hints)
kwarg_hints = native_type_compatibility.convert_to_beam_types(kwarg_hints)
self._type_hints = self._get_or_create_type_hints().with_input_types(
*arg_hints, **kwarg_hints)
return self
def with_output_types(self, *arg_hints, **kwarg_hints):
arg_hints = native_type_compatibility.convert_to_beam_types(arg_hints)
kwarg_hints = native_type_compatibility.convert_to_beam_types(kwarg_hints)
self._type_hints = self._get_or_create_type_hints().with_output_types(
*arg_hints, **kwarg_hints)
return self
class TypeCheckError(Exception):
pass
def _positional_arg_hints(arg, hints):
if isinstance(arg, list):
return typehints.Tuple[[_positional_arg_hints(a, hints) for a in arg]]
return hints.get(arg, typehints.Any)
def _unpack_positional_arg_hints(arg, hint):
if isinstance(arg, list):
tuple_constraint = typehints.Tuple[[typehints.Any] * len(arg)]
if not typehints.is_consistent_with(hint, tuple_constraint):
raise TypeCheckError(
'Bad tuple arguments for %s: expected %s, got %s' %
(arg, tuple_constraint, hint))
if isinstance(hint, typehints.TupleConstraint):
return tuple(
_unpack_positional_arg_hints(a, t) for a,
t in zip(arg, hint.tuple_types))
return (typehints.Any, ) * len(arg)
return hint
def getcallargs_forhints(func, *typeargs, **typekwargs):
if sys.version_info < (3, ):
return getcallargs_forhints_impl_py2(func, typeargs, typekwargs)
else:
return getcallargs_forhints_impl_py3(func, typeargs, typekwargs)
def getcallargs_forhints_impl_py2(func, typeargs, typekwargs):
argspec = getfullargspec(func)
packed_typeargs = [
_unpack_positional_arg_hints(arg, hint)
for (arg, hint) in zip(argspec.args, typeargs)
]
packed_typeargs += list(typeargs[len(packed_typeargs):])
inspect.getargspec = getfullargspec
try:
callargs = inspect.getcallargs(func, *packed_typeargs, **typekwargs)
except TypeError as e:
raise TypeCheckError(e)
finally:
inspect.getargspec = _original_getfullargspec
if argspec.defaults:
for k, var in enumerate(reversed(argspec.args)):
if k >= len(argspec.defaults):
break
if callargs.get(var, None) is argspec.defaults[-k - 1]:
callargs[var] = typehints.Any
if argspec.varargs:
callargs[argspec.varargs] = typekwargs.get(
argspec.varargs, _ANY_VAR_POSITIONAL)
varkw = argspec.keywords
if varkw:
callargs[varkw] = typekwargs.get(varkw, _ANY_VAR_KEYWORD)
return callargs
def _normalize_var_positional_hint(hint):
if not hint or type(hint) != tuple:
raise TypeCheckError('Unexpected VAR_POSITIONAL value: %s' % hint)
if len(hint) == 1 and isinstance(hint[0], typehints.TupleSequenceConstraint):
return hint[0]
else:
return typehints.Tuple[typehints.Union[hint], ...]
def _normalize_var_keyword_hint(hint, arg_name):
if not hint or type(hint) != dict:
raise TypeCheckError('Unexpected VAR_KEYWORD value: %s' % hint)
keys = list(hint.keys())
values = list(hint.values())
if (len(values) == 1 and keys[0] == arg_name and
isinstance(values[0], typehints.DictConstraint)):
return values[0]
else:
return typehints.Dict[str, typehints.Union[values]]
def getcallargs_forhints_impl_py3(func, type_args, type_kwargs):
try:
signature = get_signature(func)
except ValueError as e:
logging.warning('Could not get signature for function: %s: %s', func, e)
return {}
try:
bindings = signature.bind(*type_args, **type_kwargs)
except TypeError as e:
raise TypeCheckError(e)
bound_args = bindings.arguments
for param in signature.parameters.values():
if param.name in bound_args:
if param.kind == param.VAR_POSITIONAL:
bound_args[param.name] = _normalize_var_positional_hint(
bound_args[param.name])
elif param.kind == param.VAR_KEYWORD:
bound_args[param.name] = _normalize_var_keyword_hint(
bound_args[param.name], param.name)
else:
if param.annotation != param.empty:
bound_args[param.name] = param.annotation
elif param.kind == param.VAR_POSITIONAL:
bound_args[param.name] = _ANY_VAR_POSITIONAL
elif param.kind == param.VAR_KEYWORD:
bound_args[param.name] = _ANY_VAR_KEYWORD
elif param.default is not param.empty:
bound_args[param.name] = typehints.Any
else:
raise ValueError('Unexpected unbound parameter: %s' % param.name)
return dict(bound_args)
def get_type_hints(fn):
if not hasattr(fn, '_type_hints'):
try:
fn._type_hints = IOTypeHints.empty()
except (AttributeError, TypeError):
# but might have some restrictions anyways...
hints = IOTypeHints.empty()
# Python 3.7 introduces annotations for _MethodDescriptorTypes.
if isinstance(fn, _MethodDescriptorType) and sys.version_info < (3, 7):
hints = hints.with_input_types(fn.__objclass__) # type: ignore
return hints
return fn._type_hints
# pylint: enable=protected-access
def with_input_types(*positional_hints, **keyword_hints):
# type: (*Any, **Any) -> Callable[[T], T]
converted_positional_hints = (
native_type_compatibility.convert_to_beam_types(positional_hints))
converted_keyword_hints = (
native_type_compatibility.convert_to_beam_types(keyword_hints))
del positional_hints
del keyword_hints
def annotate_input_types(f):
if isinstance(f, types.FunctionType):
for t in (list(converted_positional_hints) +
list(converted_keyword_hints.values())):
validate_composite_type_param(
t, error_msg_prefix='All type hint arguments')
th = getattr(f, '_type_hints', IOTypeHints.empty()).with_input_types(
*converted_positional_hints, **converted_keyword_hints)
f._type_hints = th # pylint: disable=protected-access
return f
return annotate_input_types
def with_output_types(*return_type_hint, **kwargs):
# type: (*Any, **Any) -> Callable[[T], T]
if kwargs:
raise ValueError(
"All arguments for the 'returns' decorator must be "
"positional arguments.")
if len(return_type_hint) != 1:
raise ValueError(
"'returns' accepts only a single positional argument. In "
"order to specify multiple return types, use the 'Tuple' "
"type-hint.")
return_type_hint = native_type_compatibility.convert_to_beam_type(
return_type_hint[0])
validate_composite_type_param(
return_type_hint, error_msg_prefix='All type hint arguments')
def annotate_output_types(f):
th = getattr(f, '_type_hints', IOTypeHints.empty())
f._type_hints = th.with_output_types(return_type_hint) # pylint: disable=protected-access
return f
return annotate_output_types
def _check_instance_type(
type_constraint, instance, var_name=None, verbose=False):
hint_type = (
"argument: '%s'" % var_name if var_name is not None else 'return type')
try:
check_constraint(type_constraint, instance)
except SimpleTypeHintError:
if verbose:
verbose_instance = '%s, ' % instance
else:
verbose_instance = ''
raise TypeCheckError(
'Type-hint for %s violated. Expected an '
'instance of %s, instead found %san instance of %s.' %
(hint_type, type_constraint, verbose_instance, type(instance)))
except CompositeTypeHintError as e:
raise TypeCheckError('Type-hint for %s violated: %s' % (hint_type, e))
def _interleave_type_check(type_constraint, var_name=None):
def wrapper(gen):
if isinstance(gen, GeneratorWrapper):
return gen
return GeneratorWrapper(
gen, lambda x: _check_instance_type(type_constraint, x, var_name))
return wrapper
class GeneratorWrapper(object):
def __init__(self, gen, interleave_func):
self.internal_gen = gen
self.interleave_func = interleave_func
def __getattr__(self, attr):
# TODO(laolu): May also want to intercept 'send' in the future if we move to
# a GeneratorHint with 3 type-params:
# * Generator[send_type, return_type, yield_type]
if attr == '__next__':
return self.__next__()
elif attr == '__iter__':
return self.__iter__()
return getattr(self.internal_gen, attr)
def __next__(self):
next_val = next(self.internal_gen)
self.interleave_func(next_val)
return next_val
next = __next__
def __iter__(self):
for x in self.internal_gen:
self.interleave_func(x)
yield x
| true | true |
f726e812891f79ad91797716634694dc86a45c44 | 4,022 | py | Python | h/services/group.py | julien-cheng/h | 36c8ec044725720cf36f0986cdf025395aca8929 | [
"BSD-2-Clause"
] | 2 | 2019-08-04T07:22:11.000Z | 2020-07-17T05:01:41.000Z | h/services/group.py | 11-eleven-11/h | 91c7a4504ad7471ed3e30246763a03e6c1cc531b | [
"BSD-2-Clause"
] | null | null | null | h/services/group.py | 11-eleven-11/h | 91c7a4504ad7471ed3e30246763a03e6c1cc531b | [
"BSD-2-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import sqlalchemy as sa
from h.models import Group, User
from h.models.group import ReadableBy
from h.util import group as group_util
class GroupService(object):
def __init__(self, session, user_fetcher):
"""
Create a new groups service.
:param session: the SQLAlchemy session object
:param user_fetcher: a callable for fetching users by userid
:param publish: a callable for publishing events
"""
self.session = session
self.user_fetcher = user_fetcher
def fetch(self, pubid_or_groupid):
"""
Fetch a group using either a groupid or a pubid.
:arg pubid_or_groupid: a string in either :mod:`~h.pubid` format
or as :attr:`h.models.Group.groupid`
:rtype: :class:`~h.models.Group` or ``None``
"""
if group_util.is_groupid(pubid_or_groupid):
return self.fetch_by_groupid(pubid_or_groupid)
return self.fetch_by_pubid(pubid_or_groupid)
def fetch_by_pubid(self, pubid):
"""Return a group with the given ``pubid`` or ``None``."""
return self.session.query(Group).filter_by(pubid=pubid).one_or_none()
def fetch_by_groupid(self, groupid):
"""
Return a group with the given ``groupid`` or ``None``.
:arg groupid: String in groupid format, e.g. ``group:foo@bar.com``.
See :class:`~h.models.Group`
:raises ValueError: if ``groupid`` is not a valid groupid.
See :func:`h.util.group.split_groupid`
:rtype: :class:`~h.models.Group` or ``None``
"""
parts = group_util.split_groupid(groupid)
authority = parts["authority"]
authority_provided_id = parts["authority_provided_id"]
return (
self.session.query(Group)
.filter_by(authority=authority)
.filter_by(authority_provided_id=authority_provided_id)
.one_or_none()
)
def filter_by_name(self, name=None):
"""
Return a Query of all Groups, optionally filtered by name.
If ``name`` is present, groups will be filtered by name. Filtering
is case-insensitive and wildcarded. Otherwise, all groups will be
retrieved.
:rtype: sqlalchemy.orm.query.Query
"""
filter_terms = []
if name:
filter_terms.append(
sa.func.lower(Group.name).like("%{}%".format(name.lower()))
)
return (
self.session.query(Group)
.filter(*filter_terms)
.order_by(Group.created.desc())
)
def groupids_readable_by(self, user):
"""
Return a list of pubids for which the user has read access.
If the passed-in user is ``None``, this returns the list of
world-readable groups.
:type user: `h.models.user.User`
"""
readable = Group.readable_by == ReadableBy.world
if user is not None:
readable_member = sa.and_(
Group.readable_by == ReadableBy.members,
Group.members.any(User.id == user.id),
)
readable = sa.or_(readable, readable_member)
return [
record.pubid for record in self.session.query(Group.pubid).filter(readable)
]
def groupids_created_by(self, user):
"""
Return a list of pubids which the user created.
If the passed-in user is ``None``, this returns an empty list.
:type user: `h.models.user.User` or None
"""
if user is None:
return []
return [
g.pubid for g in self.session.query(Group.pubid).filter_by(creator=user)
]
def groups_factory(context, request):
"""Return a GroupService instance for the passed context and request."""
user_service = request.find_service(name="user")
return GroupService(session=request.db, user_fetcher=user_service.fetch)
| 31.920635 | 87 | 0.60915 |
from __future__ import unicode_literals
import sqlalchemy as sa
from h.models import Group, User
from h.models.group import ReadableBy
from h.util import group as group_util
class GroupService(object):
def __init__(self, session, user_fetcher):
self.session = session
self.user_fetcher = user_fetcher
def fetch(self, pubid_or_groupid):
if group_util.is_groupid(pubid_or_groupid):
return self.fetch_by_groupid(pubid_or_groupid)
return self.fetch_by_pubid(pubid_or_groupid)
def fetch_by_pubid(self, pubid):
return self.session.query(Group).filter_by(pubid=pubid).one_or_none()
def fetch_by_groupid(self, groupid):
parts = group_util.split_groupid(groupid)
authority = parts["authority"]
authority_provided_id = parts["authority_provided_id"]
return (
self.session.query(Group)
.filter_by(authority=authority)
.filter_by(authority_provided_id=authority_provided_id)
.one_or_none()
)
def filter_by_name(self, name=None):
filter_terms = []
if name:
filter_terms.append(
sa.func.lower(Group.name).like("%{}%".format(name.lower()))
)
return (
self.session.query(Group)
.filter(*filter_terms)
.order_by(Group.created.desc())
)
def groupids_readable_by(self, user):
readable = Group.readable_by == ReadableBy.world
if user is not None:
readable_member = sa.and_(
Group.readable_by == ReadableBy.members,
Group.members.any(User.id == user.id),
)
readable = sa.or_(readable, readable_member)
return [
record.pubid for record in self.session.query(Group.pubid).filter(readable)
]
def groupids_created_by(self, user):
if user is None:
return []
return [
g.pubid for g in self.session.query(Group.pubid).filter_by(creator=user)
]
def groups_factory(context, request):
user_service = request.find_service(name="user")
return GroupService(session=request.db, user_fetcher=user_service.fetch)
| true | true |
f726e91e889b74acf6f116c6d95887b343147e4d | 73,451 | py | Python | tools/sourcecode/Python-3.10.0/Lib/asyncio/base_events.py | gagominecraft12/Blueity-Client-Retrace | d42a927a85226d73da66123922d9ea11cc20ac3d | [
"MIT"
] | 33 | 2021-07-25T14:23:35.000Z | 2022-03-31T00:17:30.000Z | tools/sourcecode/Python-3.10.0/Lib/asyncio/base_events.py | gagominecraft12/Blueity-Client-Retrace | d42a927a85226d73da66123922d9ea11cc20ac3d | [
"MIT"
] | 32 | 2019-04-26T12:29:36.000Z | 2022-03-08T14:24:30.000Z | Lib/asyncio/base_events.py | val-verde/cpython | 17aa701d799d5e071d83205d877f722f1498a09f | [
"0BSD"
] | 3 | 2019-11-12T15:21:58.000Z | 2020-09-04T14:27:55.000Z | """Base implementation of event loop.
The event loop can be broken up into a multiplexer (the part
responsible for notifying us of I/O events) and the event loop proper,
which wraps a multiplexer with functionality for scheduling callbacks,
immediately or at a given time in the future.
Whenever a public API takes a callback, subsequent positional
arguments will be passed to the callback if/when it is called. This
avoids the proliferation of trivial lambdas implementing closures.
Keyword arguments for the callback are not supported; this is a
conscious design decision, leaving the door open for keyword arguments
to modify the meaning of the API call itself.
"""
import collections
import collections.abc
import concurrent.futures
import functools
import heapq
import itertools
import os
import socket
import stat
import subprocess
import threading
import time
import traceback
import sys
import warnings
import weakref
try:
import ssl
except ImportError: # pragma: no cover
ssl = None
from . import constants
from . import coroutines
from . import events
from . import exceptions
from . import futures
from . import protocols
from . import sslproto
from . import staggered
from . import tasks
from . import transports
from . import trsock
from .log import logger
__all__ = 'BaseEventLoop',
# Minimum number of _scheduled timer handles before cleanup of
# cancelled handles is performed.
_MIN_SCHEDULED_TIMER_HANDLES = 100
# Minimum fraction of _scheduled timer handles that are cancelled
# before cleanup of cancelled handles is performed.
_MIN_CANCELLED_TIMER_HANDLES_FRACTION = 0.5
_HAS_IPv6 = hasattr(socket, 'AF_INET6')
# Maximum timeout passed to select to avoid OS limitations
MAXIMUM_SELECT_TIMEOUT = 24 * 3600
# Used for deprecation and removal of `loop.create_datagram_endpoint()`'s
# *reuse_address* parameter
_unset = object()
def _format_handle(handle):
cb = handle._callback
if isinstance(getattr(cb, '__self__', None), tasks.Task):
# format the task
return repr(cb.__self__)
else:
return str(handle)
def _format_pipe(fd):
if fd == subprocess.PIPE:
return '<pipe>'
elif fd == subprocess.STDOUT:
return '<stdout>'
else:
return repr(fd)
def _set_reuseport(sock):
if not hasattr(socket, 'SO_REUSEPORT'):
raise ValueError('reuse_port not supported by socket module')
else:
try:
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)
except OSError:
raise ValueError('reuse_port not supported by socket module, '
'SO_REUSEPORT defined but not implemented.')
def _ipaddr_info(host, port, family, type, proto, flowinfo=0, scopeid=0):
# Try to skip getaddrinfo if "host" is already an IP. Users might have
# handled name resolution in their own code and pass in resolved IPs.
if not hasattr(socket, 'inet_pton'):
return
if proto not in {0, socket.IPPROTO_TCP, socket.IPPROTO_UDP} or \
host is None:
return None
if type == socket.SOCK_STREAM:
proto = socket.IPPROTO_TCP
elif type == socket.SOCK_DGRAM:
proto = socket.IPPROTO_UDP
else:
return None
if port is None:
port = 0
elif isinstance(port, bytes) and port == b'':
port = 0
elif isinstance(port, str) and port == '':
port = 0
else:
# If port's a service name like "http", don't skip getaddrinfo.
try:
port = int(port)
except (TypeError, ValueError):
return None
if family == socket.AF_UNSPEC:
afs = [socket.AF_INET]
if _HAS_IPv6:
afs.append(socket.AF_INET6)
else:
afs = [family]
if isinstance(host, bytes):
host = host.decode('idna')
if '%' in host:
# Linux's inet_pton doesn't accept an IPv6 zone index after host,
# like '::1%lo0'.
return None
for af in afs:
try:
socket.inet_pton(af, host)
# The host has already been resolved.
if _HAS_IPv6 and af == socket.AF_INET6:
return af, type, proto, '', (host, port, flowinfo, scopeid)
else:
return af, type, proto, '', (host, port)
except OSError:
pass
# "host" is not an IP address.
return None
def _interleave_addrinfos(addrinfos, first_address_family_count=1):
"""Interleave list of addrinfo tuples by family."""
# Group addresses by family
addrinfos_by_family = collections.OrderedDict()
for addr in addrinfos:
family = addr[0]
if family not in addrinfos_by_family:
addrinfos_by_family[family] = []
addrinfos_by_family[family].append(addr)
addrinfos_lists = list(addrinfos_by_family.values())
reordered = []
if first_address_family_count > 1:
reordered.extend(addrinfos_lists[0][:first_address_family_count - 1])
del addrinfos_lists[0][:first_address_family_count - 1]
reordered.extend(
a for a in itertools.chain.from_iterable(
itertools.zip_longest(*addrinfos_lists)
) if a is not None)
return reordered
def _run_until_complete_cb(fut):
if not fut.cancelled():
exc = fut.exception()
if isinstance(exc, (SystemExit, KeyboardInterrupt)):
# Issue #22429: run_forever() already finished, no need to
# stop it.
return
futures._get_loop(fut).stop()
if hasattr(socket, 'TCP_NODELAY'):
def _set_nodelay(sock):
if (sock.family in {socket.AF_INET, socket.AF_INET6} and
sock.type == socket.SOCK_STREAM and
sock.proto == socket.IPPROTO_TCP):
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
else:
def _set_nodelay(sock):
pass
class _SendfileFallbackProtocol(protocols.Protocol):
def __init__(self, transp):
if not isinstance(transp, transports._FlowControlMixin):
raise TypeError("transport should be _FlowControlMixin instance")
self._transport = transp
self._proto = transp.get_protocol()
self._should_resume_reading = transp.is_reading()
self._should_resume_writing = transp._protocol_paused
transp.pause_reading()
transp.set_protocol(self)
if self._should_resume_writing:
self._write_ready_fut = self._transport._loop.create_future()
else:
self._write_ready_fut = None
async def drain(self):
if self._transport.is_closing():
raise ConnectionError("Connection closed by peer")
fut = self._write_ready_fut
if fut is None:
return
await fut
def connection_made(self, transport):
raise RuntimeError("Invalid state: "
"connection should have been established already.")
def connection_lost(self, exc):
if self._write_ready_fut is not None:
# Never happens if peer disconnects after sending the whole content
# Thus disconnection is always an exception from user perspective
if exc is None:
self._write_ready_fut.set_exception(
ConnectionError("Connection is closed by peer"))
else:
self._write_ready_fut.set_exception(exc)
self._proto.connection_lost(exc)
def pause_writing(self):
if self._write_ready_fut is not None:
return
self._write_ready_fut = self._transport._loop.create_future()
def resume_writing(self):
if self._write_ready_fut is None:
return
self._write_ready_fut.set_result(False)
self._write_ready_fut = None
def data_received(self, data):
raise RuntimeError("Invalid state: reading should be paused")
def eof_received(self):
raise RuntimeError("Invalid state: reading should be paused")
async def restore(self):
self._transport.set_protocol(self._proto)
if self._should_resume_reading:
self._transport.resume_reading()
if self._write_ready_fut is not None:
# Cancel the future.
# Basically it has no effect because protocol is switched back,
# no code should wait for it anymore.
self._write_ready_fut.cancel()
if self._should_resume_writing:
self._proto.resume_writing()
class Server(events.AbstractServer):
def __init__(self, loop, sockets, protocol_factory, ssl_context, backlog,
ssl_handshake_timeout):
self._loop = loop
self._sockets = sockets
self._active_count = 0
self._waiters = []
self._protocol_factory = protocol_factory
self._backlog = backlog
self._ssl_context = ssl_context
self._ssl_handshake_timeout = ssl_handshake_timeout
self._serving = False
self._serving_forever_fut = None
def __repr__(self):
return f'<{self.__class__.__name__} sockets={self.sockets!r}>'
def _attach(self):
assert self._sockets is not None
self._active_count += 1
def _detach(self):
assert self._active_count > 0
self._active_count -= 1
if self._active_count == 0 and self._sockets is None:
self._wakeup()
def _wakeup(self):
waiters = self._waiters
self._waiters = None
for waiter in waiters:
if not waiter.done():
waiter.set_result(waiter)
def _start_serving(self):
if self._serving:
return
self._serving = True
for sock in self._sockets:
sock.listen(self._backlog)
self._loop._start_serving(
self._protocol_factory, sock, self._ssl_context,
self, self._backlog, self._ssl_handshake_timeout)
def get_loop(self):
return self._loop
def is_serving(self):
return self._serving
@property
def sockets(self):
if self._sockets is None:
return ()
return tuple(trsock.TransportSocket(s) for s in self._sockets)
def close(self):
sockets = self._sockets
if sockets is None:
return
self._sockets = None
for sock in sockets:
self._loop._stop_serving(sock)
self._serving = False
if (self._serving_forever_fut is not None and
not self._serving_forever_fut.done()):
self._serving_forever_fut.cancel()
self._serving_forever_fut = None
if self._active_count == 0:
self._wakeup()
async def start_serving(self):
self._start_serving()
# Skip one loop iteration so that all 'loop.add_reader'
# go through.
await tasks.sleep(0)
async def serve_forever(self):
if self._serving_forever_fut is not None:
raise RuntimeError(
f'server {self!r} is already being awaited on serve_forever()')
if self._sockets is None:
raise RuntimeError(f'server {self!r} is closed')
self._start_serving()
self._serving_forever_fut = self._loop.create_future()
try:
await self._serving_forever_fut
except exceptions.CancelledError:
try:
self.close()
await self.wait_closed()
finally:
raise
finally:
self._serving_forever_fut = None
async def wait_closed(self):
if self._sockets is None or self._waiters is None:
return
waiter = self._loop.create_future()
self._waiters.append(waiter)
await waiter
class BaseEventLoop(events.AbstractEventLoop):
def __init__(self):
self._timer_cancelled_count = 0
self._closed = False
self._stopping = False
self._ready = collections.deque()
self._scheduled = []
self._default_executor = None
self._internal_fds = 0
# Identifier of the thread running the event loop, or None if the
# event loop is not running
self._thread_id = None
self._clock_resolution = time.get_clock_info('monotonic').resolution
self._exception_handler = None
self.set_debug(coroutines._is_debug_mode())
# In debug mode, if the execution of a callback or a step of a task
# exceed this duration in seconds, the slow callback/task is logged.
self.slow_callback_duration = 0.1
self._current_handle = None
self._task_factory = None
self._coroutine_origin_tracking_enabled = False
self._coroutine_origin_tracking_saved_depth = None
# A weak set of all asynchronous generators that are
# being iterated by the loop.
self._asyncgens = weakref.WeakSet()
# Set to True when `loop.shutdown_asyncgens` is called.
self._asyncgens_shutdown_called = False
# Set to True when `loop.shutdown_default_executor` is called.
self._executor_shutdown_called = False
def __repr__(self):
return (
f'<{self.__class__.__name__} running={self.is_running()} '
f'closed={self.is_closed()} debug={self.get_debug()}>'
)
def create_future(self):
"""Create a Future object attached to the loop."""
return futures.Future(loop=self)
def create_task(self, coro, *, name=None):
"""Schedule a coroutine object.
Return a task object.
"""
self._check_closed()
if self._task_factory is None:
task = tasks.Task(coro, loop=self, name=name)
if task._source_traceback:
del task._source_traceback[-1]
else:
task = self._task_factory(self, coro)
tasks._set_task_name(task, name)
return task
def set_task_factory(self, factory):
"""Set a task factory that will be used by loop.create_task().
If factory is None the default task factory will be set.
If factory is a callable, it should have a signature matching
'(loop, coro)', where 'loop' will be a reference to the active
event loop, 'coro' will be a coroutine object. The callable
must return a Future.
"""
if factory is not None and not callable(factory):
raise TypeError('task factory must be a callable or None')
self._task_factory = factory
def get_task_factory(self):
"""Return a task factory, or None if the default one is in use."""
return self._task_factory
def _make_socket_transport(self, sock, protocol, waiter=None, *,
extra=None, server=None):
"""Create socket transport."""
raise NotImplementedError
def _make_ssl_transport(
self, rawsock, protocol, sslcontext, waiter=None,
*, server_side=False, server_hostname=None,
extra=None, server=None,
ssl_handshake_timeout=None,
call_connection_made=True):
"""Create SSL transport."""
raise NotImplementedError
def _make_datagram_transport(self, sock, protocol,
address=None, waiter=None, extra=None):
"""Create datagram transport."""
raise NotImplementedError
def _make_read_pipe_transport(self, pipe, protocol, waiter=None,
extra=None):
"""Create read pipe transport."""
raise NotImplementedError
def _make_write_pipe_transport(self, pipe, protocol, waiter=None,
extra=None):
"""Create write pipe transport."""
raise NotImplementedError
async def _make_subprocess_transport(self, protocol, args, shell,
stdin, stdout, stderr, bufsize,
extra=None, **kwargs):
"""Create subprocess transport."""
raise NotImplementedError
def _write_to_self(self):
"""Write a byte to self-pipe, to wake up the event loop.
This may be called from a different thread.
The subclass is responsible for implementing the self-pipe.
"""
raise NotImplementedError
def _process_events(self, event_list):
"""Process selector events."""
raise NotImplementedError
def _check_closed(self):
if self._closed:
raise RuntimeError('Event loop is closed')
def _check_default_executor(self):
if self._executor_shutdown_called:
raise RuntimeError('Executor shutdown has been called')
def _asyncgen_finalizer_hook(self, agen):
self._asyncgens.discard(agen)
if not self.is_closed():
self.call_soon_threadsafe(self.create_task, agen.aclose())
def _asyncgen_firstiter_hook(self, agen):
if self._asyncgens_shutdown_called:
warnings.warn(
f"asynchronous generator {agen!r} was scheduled after "
f"loop.shutdown_asyncgens() call",
ResourceWarning, source=self)
self._asyncgens.add(agen)
async def shutdown_asyncgens(self):
"""Shutdown all active asynchronous generators."""
self._asyncgens_shutdown_called = True
if not len(self._asyncgens):
# If Python version is <3.6 or we don't have any asynchronous
# generators alive.
return
closing_agens = list(self._asyncgens)
self._asyncgens.clear()
results = await tasks.gather(
*[ag.aclose() for ag in closing_agens],
return_exceptions=True)
for result, agen in zip(results, closing_agens):
if isinstance(result, Exception):
self.call_exception_handler({
'message': f'an error occurred during closing of '
f'asynchronous generator {agen!r}',
'exception': result,
'asyncgen': agen
})
async def shutdown_default_executor(self):
"""Schedule the shutdown of the default executor."""
self._executor_shutdown_called = True
if self._default_executor is None:
return
future = self.create_future()
thread = threading.Thread(target=self._do_shutdown, args=(future,))
thread.start()
try:
await future
finally:
thread.join()
def _do_shutdown(self, future):
try:
self._default_executor.shutdown(wait=True)
self.call_soon_threadsafe(future.set_result, None)
except Exception as ex:
self.call_soon_threadsafe(future.set_exception, ex)
def _check_running(self):
if self.is_running():
raise RuntimeError('This event loop is already running')
if events._get_running_loop() is not None:
raise RuntimeError(
'Cannot run the event loop while another loop is running')
def run_forever(self):
"""Run until stop() is called."""
self._check_closed()
self._check_running()
self._set_coroutine_origin_tracking(self._debug)
self._thread_id = threading.get_ident()
old_agen_hooks = sys.get_asyncgen_hooks()
sys.set_asyncgen_hooks(firstiter=self._asyncgen_firstiter_hook,
finalizer=self._asyncgen_finalizer_hook)
try:
events._set_running_loop(self)
while True:
self._run_once()
if self._stopping:
break
finally:
self._stopping = False
self._thread_id = None
events._set_running_loop(None)
self._set_coroutine_origin_tracking(False)
sys.set_asyncgen_hooks(*old_agen_hooks)
def run_until_complete(self, future):
"""Run until the Future is done.
If the argument is a coroutine, it is wrapped in a Task.
WARNING: It would be disastrous to call run_until_complete()
with the same coroutine twice -- it would wrap it in two
different Tasks and that can't be good.
Return the Future's result, or raise its exception.
"""
self._check_closed()
self._check_running()
new_task = not futures.isfuture(future)
future = tasks.ensure_future(future, loop=self)
if new_task:
# An exception is raised if the future didn't complete, so there
# is no need to log the "destroy pending task" message
future._log_destroy_pending = False
future.add_done_callback(_run_until_complete_cb)
try:
self.run_forever()
except:
if new_task and future.done() and not future.cancelled():
# The coroutine raised a BaseException. Consume the exception
# to not log a warning, the caller doesn't have access to the
# local task.
future.exception()
raise
finally:
future.remove_done_callback(_run_until_complete_cb)
if not future.done():
raise RuntimeError('Event loop stopped before Future completed.')
return future.result()
def stop(self):
"""Stop running the event loop.
Every callback already scheduled will still run. This simply informs
run_forever to stop looping after a complete iteration.
"""
self._stopping = True
def close(self):
"""Close the event loop.
This clears the queues and shuts down the executor,
but does not wait for the executor to finish.
The event loop must not be running.
"""
if self.is_running():
raise RuntimeError("Cannot close a running event loop")
if self._closed:
return
if self._debug:
logger.debug("Close %r", self)
self._closed = True
self._ready.clear()
self._scheduled.clear()
self._executor_shutdown_called = True
executor = self._default_executor
if executor is not None:
self._default_executor = None
executor.shutdown(wait=False)
def is_closed(self):
"""Returns True if the event loop was closed."""
return self._closed
def __del__(self, _warn=warnings.warn):
if not self.is_closed():
_warn(f"unclosed event loop {self!r}", ResourceWarning, source=self)
if not self.is_running():
self.close()
def is_running(self):
"""Returns True if the event loop is running."""
return (self._thread_id is not None)
def time(self):
"""Return the time according to the event loop's clock.
This is a float expressed in seconds since an epoch, but the
epoch, precision, accuracy and drift are unspecified and may
differ per event loop.
"""
return time.monotonic()
def call_later(self, delay, callback, *args, context=None):
"""Arrange for a callback to be called at a given time.
Return a Handle: an opaque object with a cancel() method that
can be used to cancel the call.
The delay can be an int or float, expressed in seconds. It is
always relative to the current time.
Each callback will be called exactly once. If two callbacks
are scheduled for exactly the same time, it undefined which
will be called first.
Any positional arguments after the callback will be passed to
the callback when it is called.
"""
timer = self.call_at(self.time() + delay, callback, *args,
context=context)
if timer._source_traceback:
del timer._source_traceback[-1]
return timer
def call_at(self, when, callback, *args, context=None):
"""Like call_later(), but uses an absolute time.
Absolute time corresponds to the event loop's time() method.
"""
self._check_closed()
if self._debug:
self._check_thread()
self._check_callback(callback, 'call_at')
timer = events.TimerHandle(when, callback, args, self, context)
if timer._source_traceback:
del timer._source_traceback[-1]
heapq.heappush(self._scheduled, timer)
timer._scheduled = True
return timer
def call_soon(self, callback, *args, context=None):
"""Arrange for a callback to be called as soon as possible.
This operates as a FIFO queue: callbacks are called in the
order in which they are registered. Each callback will be
called exactly once.
Any positional arguments after the callback will be passed to
the callback when it is called.
"""
self._check_closed()
if self._debug:
self._check_thread()
self._check_callback(callback, 'call_soon')
handle = self._call_soon(callback, args, context)
if handle._source_traceback:
del handle._source_traceback[-1]
return handle
def _check_callback(self, callback, method):
if (coroutines.iscoroutine(callback) or
coroutines.iscoroutinefunction(callback)):
raise TypeError(
f"coroutines cannot be used with {method}()")
if not callable(callback):
raise TypeError(
f'a callable object was expected by {method}(), '
f'got {callback!r}')
def _call_soon(self, callback, args, context):
handle = events.Handle(callback, args, self, context)
if handle._source_traceback:
del handle._source_traceback[-1]
self._ready.append(handle)
return handle
def _check_thread(self):
"""Check that the current thread is the thread running the event loop.
Non-thread-safe methods of this class make this assumption and will
likely behave incorrectly when the assumption is violated.
Should only be called when (self._debug == True). The caller is
responsible for checking this condition for performance reasons.
"""
if self._thread_id is None:
return
thread_id = threading.get_ident()
if thread_id != self._thread_id:
raise RuntimeError(
"Non-thread-safe operation invoked on an event loop other "
"than the current one")
def call_soon_threadsafe(self, callback, *args, context=None):
"""Like call_soon(), but thread-safe."""
self._check_closed()
if self._debug:
self._check_callback(callback, 'call_soon_threadsafe')
handle = self._call_soon(callback, args, context)
if handle._source_traceback:
del handle._source_traceback[-1]
self._write_to_self()
return handle
def run_in_executor(self, executor, func, *args):
self._check_closed()
if self._debug:
self._check_callback(func, 'run_in_executor')
if executor is None:
executor = self._default_executor
# Only check when the default executor is being used
self._check_default_executor()
if executor is None:
executor = concurrent.futures.ThreadPoolExecutor(
thread_name_prefix='asyncio'
)
self._default_executor = executor
return futures.wrap_future(
executor.submit(func, *args), loop=self)
def set_default_executor(self, executor):
if not isinstance(executor, concurrent.futures.ThreadPoolExecutor):
warnings.warn(
'Using the default executor that is not an instance of '
'ThreadPoolExecutor is deprecated and will be prohibited '
'in Python 3.9',
DeprecationWarning, 2)
self._default_executor = executor
def _getaddrinfo_debug(self, host, port, family, type, proto, flags):
msg = [f"{host}:{port!r}"]
if family:
msg.append(f'family={family!r}')
if type:
msg.append(f'type={type!r}')
if proto:
msg.append(f'proto={proto!r}')
if flags:
msg.append(f'flags={flags!r}')
msg = ', '.join(msg)
logger.debug('Get address info %s', msg)
t0 = self.time()
addrinfo = socket.getaddrinfo(host, port, family, type, proto, flags)
dt = self.time() - t0
msg = f'Getting address info {msg} took {dt * 1e3:.3f}ms: {addrinfo!r}'
if dt >= self.slow_callback_duration:
logger.info(msg)
else:
logger.debug(msg)
return addrinfo
async def getaddrinfo(self, host, port, *,
family=0, type=0, proto=0, flags=0):
if self._debug:
getaddr_func = self._getaddrinfo_debug
else:
getaddr_func = socket.getaddrinfo
return await self.run_in_executor(
None, getaddr_func, host, port, family, type, proto, flags)
async def getnameinfo(self, sockaddr, flags=0):
return await self.run_in_executor(
None, socket.getnameinfo, sockaddr, flags)
async def sock_sendfile(self, sock, file, offset=0, count=None,
*, fallback=True):
if self._debug and sock.gettimeout() != 0:
raise ValueError("the socket must be non-blocking")
self._check_sendfile_params(sock, file, offset, count)
try:
return await self._sock_sendfile_native(sock, file,
offset, count)
except exceptions.SendfileNotAvailableError as exc:
if not fallback:
raise
return await self._sock_sendfile_fallback(sock, file,
offset, count)
async def _sock_sendfile_native(self, sock, file, offset, count):
# NB: sendfile syscall is not supported for SSL sockets and
# non-mmap files even if sendfile is supported by OS
raise exceptions.SendfileNotAvailableError(
f"syscall sendfile is not available for socket {sock!r} "
"and file {file!r} combination")
async def _sock_sendfile_fallback(self, sock, file, offset, count):
if offset:
file.seek(offset)
blocksize = (
min(count, constants.SENDFILE_FALLBACK_READBUFFER_SIZE)
if count else constants.SENDFILE_FALLBACK_READBUFFER_SIZE
)
buf = bytearray(blocksize)
total_sent = 0
try:
while True:
if count:
blocksize = min(count - total_sent, blocksize)
if blocksize <= 0:
break
view = memoryview(buf)[:blocksize]
read = await self.run_in_executor(None, file.readinto, view)
if not read:
break # EOF
await self.sock_sendall(sock, view[:read])
total_sent += read
return total_sent
finally:
if total_sent > 0 and hasattr(file, 'seek'):
file.seek(offset + total_sent)
def _check_sendfile_params(self, sock, file, offset, count):
if 'b' not in getattr(file, 'mode', 'b'):
raise ValueError("file should be opened in binary mode")
if not sock.type == socket.SOCK_STREAM:
raise ValueError("only SOCK_STREAM type sockets are supported")
if count is not None:
if not isinstance(count, int):
raise TypeError(
"count must be a positive integer (got {!r})".format(count))
if count <= 0:
raise ValueError(
"count must be a positive integer (got {!r})".format(count))
if not isinstance(offset, int):
raise TypeError(
"offset must be a non-negative integer (got {!r})".format(
offset))
if offset < 0:
raise ValueError(
"offset must be a non-negative integer (got {!r})".format(
offset))
async def _connect_sock(self, exceptions, addr_info, local_addr_infos=None):
"""Create, bind and connect one socket."""
my_exceptions = []
exceptions.append(my_exceptions)
family, type_, proto, _, address = addr_info
sock = None
try:
sock = socket.socket(family=family, type=type_, proto=proto)
sock.setblocking(False)
if local_addr_infos is not None:
for _, _, _, _, laddr in local_addr_infos:
try:
sock.bind(laddr)
break
except OSError as exc:
msg = (
f'error while attempting to bind on '
f'address {laddr!r}: '
f'{exc.strerror.lower()}'
)
exc = OSError(exc.errno, msg)
my_exceptions.append(exc)
else: # all bind attempts failed
raise my_exceptions.pop()
await self.sock_connect(sock, address)
return sock
except OSError as exc:
my_exceptions.append(exc)
if sock is not None:
sock.close()
raise
except:
if sock is not None:
sock.close()
raise
async def create_connection(
self, protocol_factory, host=None, port=None,
*, ssl=None, family=0,
proto=0, flags=0, sock=None,
local_addr=None, server_hostname=None,
ssl_handshake_timeout=None,
happy_eyeballs_delay=None, interleave=None):
"""Connect to a TCP server.
Create a streaming transport connection to a given internet host and
port: socket family AF_INET or socket.AF_INET6 depending on host (or
family if specified), socket type SOCK_STREAM. protocol_factory must be
a callable returning a protocol instance.
This method is a coroutine which will try to establish the connection
in the background. When successful, the coroutine returns a
(transport, protocol) pair.
"""
if server_hostname is not None and not ssl:
raise ValueError('server_hostname is only meaningful with ssl')
if server_hostname is None and ssl:
# Use host as default for server_hostname. It is an error
# if host is empty or not set, e.g. when an
# already-connected socket was passed or when only a port
# is given. To avoid this error, you can pass
# server_hostname='' -- this will bypass the hostname
# check. (This also means that if host is a numeric
# IP/IPv6 address, we will attempt to verify that exact
# address; this will probably fail, but it is possible to
# create a certificate for a specific IP address, so we
# don't judge it here.)
if not host:
raise ValueError('You must set server_hostname '
'when using ssl without a host')
server_hostname = host
if ssl_handshake_timeout is not None and not ssl:
raise ValueError(
'ssl_handshake_timeout is only meaningful with ssl')
if happy_eyeballs_delay is not None and interleave is None:
# If using happy eyeballs, default to interleave addresses by family
interleave = 1
if host is not None or port is not None:
if sock is not None:
raise ValueError(
'host/port and sock can not be specified at the same time')
infos = await self._ensure_resolved(
(host, port), family=family,
type=socket.SOCK_STREAM, proto=proto, flags=flags, loop=self)
if not infos:
raise OSError('getaddrinfo() returned empty list')
if local_addr is not None:
laddr_infos = await self._ensure_resolved(
local_addr, family=family,
type=socket.SOCK_STREAM, proto=proto,
flags=flags, loop=self)
if not laddr_infos:
raise OSError('getaddrinfo() returned empty list')
else:
laddr_infos = None
if interleave:
infos = _interleave_addrinfos(infos, interleave)
exceptions = []
if happy_eyeballs_delay is None:
# not using happy eyeballs
for addrinfo in infos:
try:
sock = await self._connect_sock(
exceptions, addrinfo, laddr_infos)
break
except OSError:
continue
else: # using happy eyeballs
sock, _, _ = await staggered.staggered_race(
(functools.partial(self._connect_sock,
exceptions, addrinfo, laddr_infos)
for addrinfo in infos),
happy_eyeballs_delay, loop=self)
if sock is None:
exceptions = [exc for sub in exceptions for exc in sub]
if len(exceptions) == 1:
raise exceptions[0]
else:
# If they all have the same str(), raise one.
model = str(exceptions[0])
if all(str(exc) == model for exc in exceptions):
raise exceptions[0]
# Raise a combined exception so the user can see all
# the various error messages.
raise OSError('Multiple exceptions: {}'.format(
', '.join(str(exc) for exc in exceptions)))
else:
if sock is None:
raise ValueError(
'host and port was not specified and no sock specified')
if sock.type != socket.SOCK_STREAM:
# We allow AF_INET, AF_INET6, AF_UNIX as long as they
# are SOCK_STREAM.
# We support passing AF_UNIX sockets even though we have
# a dedicated API for that: create_unix_connection.
# Disallowing AF_UNIX in this method, breaks backwards
# compatibility.
raise ValueError(
f'A Stream Socket was expected, got {sock!r}')
transport, protocol = await self._create_connection_transport(
sock, protocol_factory, ssl, server_hostname,
ssl_handshake_timeout=ssl_handshake_timeout)
if self._debug:
# Get the socket from the transport because SSL transport closes
# the old socket and creates a new SSL socket
sock = transport.get_extra_info('socket')
logger.debug("%r connected to %s:%r: (%r, %r)",
sock, host, port, transport, protocol)
return transport, protocol
async def _create_connection_transport(
self, sock, protocol_factory, ssl,
server_hostname, server_side=False,
ssl_handshake_timeout=None):
sock.setblocking(False)
protocol = protocol_factory()
waiter = self.create_future()
if ssl:
sslcontext = None if isinstance(ssl, bool) else ssl
transport = self._make_ssl_transport(
sock, protocol, sslcontext, waiter,
server_side=server_side, server_hostname=server_hostname,
ssl_handshake_timeout=ssl_handshake_timeout)
else:
transport = self._make_socket_transport(sock, protocol, waiter)
try:
await waiter
except:
transport.close()
raise
return transport, protocol
async def sendfile(self, transport, file, offset=0, count=None,
*, fallback=True):
"""Send a file to transport.
Return the total number of bytes which were sent.
The method uses high-performance os.sendfile if available.
file must be a regular file object opened in binary mode.
offset tells from where to start reading the file. If specified,
count is the total number of bytes to transmit as opposed to
sending the file until EOF is reached. File position is updated on
return or also in case of error in which case file.tell()
can be used to figure out the number of bytes
which were sent.
fallback set to True makes asyncio to manually read and send
the file when the platform does not support the sendfile syscall
(e.g. Windows or SSL socket on Unix).
Raise SendfileNotAvailableError if the system does not support
sendfile syscall and fallback is False.
"""
if transport.is_closing():
raise RuntimeError("Transport is closing")
mode = getattr(transport, '_sendfile_compatible',
constants._SendfileMode.UNSUPPORTED)
if mode is constants._SendfileMode.UNSUPPORTED:
raise RuntimeError(
f"sendfile is not supported for transport {transport!r}")
if mode is constants._SendfileMode.TRY_NATIVE:
try:
return await self._sendfile_native(transport, file,
offset, count)
except exceptions.SendfileNotAvailableError as exc:
if not fallback:
raise
if not fallback:
raise RuntimeError(
f"fallback is disabled and native sendfile is not "
f"supported for transport {transport!r}")
return await self._sendfile_fallback(transport, file,
offset, count)
async def _sendfile_native(self, transp, file, offset, count):
raise exceptions.SendfileNotAvailableError(
"sendfile syscall is not supported")
async def _sendfile_fallback(self, transp, file, offset, count):
if offset:
file.seek(offset)
blocksize = min(count, 16384) if count else 16384
buf = bytearray(blocksize)
total_sent = 0
proto = _SendfileFallbackProtocol(transp)
try:
while True:
if count:
blocksize = min(count - total_sent, blocksize)
if blocksize <= 0:
return total_sent
view = memoryview(buf)[:blocksize]
read = await self.run_in_executor(None, file.readinto, view)
if not read:
return total_sent # EOF
await proto.drain()
transp.write(view[:read])
total_sent += read
finally:
if total_sent > 0 and hasattr(file, 'seek'):
file.seek(offset + total_sent)
await proto.restore()
async def start_tls(self, transport, protocol, sslcontext, *,
server_side=False,
server_hostname=None,
ssl_handshake_timeout=None):
"""Upgrade transport to TLS.
Return a new transport that *protocol* should start using
immediately.
"""
if ssl is None:
raise RuntimeError('Python ssl module is not available')
if not isinstance(sslcontext, ssl.SSLContext):
raise TypeError(
f'sslcontext is expected to be an instance of ssl.SSLContext, '
f'got {sslcontext!r}')
if not getattr(transport, '_start_tls_compatible', False):
raise TypeError(
f'transport {transport!r} is not supported by start_tls()')
waiter = self.create_future()
ssl_protocol = sslproto.SSLProtocol(
self, protocol, sslcontext, waiter,
server_side, server_hostname,
ssl_handshake_timeout=ssl_handshake_timeout,
call_connection_made=False)
# Pause early so that "ssl_protocol.data_received()" doesn't
# have a chance to get called before "ssl_protocol.connection_made()".
transport.pause_reading()
transport.set_protocol(ssl_protocol)
conmade_cb = self.call_soon(ssl_protocol.connection_made, transport)
resume_cb = self.call_soon(transport.resume_reading)
try:
await waiter
except BaseException:
transport.close()
conmade_cb.cancel()
resume_cb.cancel()
raise
return ssl_protocol._app_transport
async def create_datagram_endpoint(self, protocol_factory,
local_addr=None, remote_addr=None, *,
family=0, proto=0, flags=0,
reuse_address=_unset, reuse_port=None,
allow_broadcast=None, sock=None):
"""Create datagram connection."""
if sock is not None:
if sock.type != socket.SOCK_DGRAM:
raise ValueError(
f'A UDP Socket was expected, got {sock!r}')
if (local_addr or remote_addr or
family or proto or flags or
reuse_port or allow_broadcast):
# show the problematic kwargs in exception msg
opts = dict(local_addr=local_addr, remote_addr=remote_addr,
family=family, proto=proto, flags=flags,
reuse_address=reuse_address, reuse_port=reuse_port,
allow_broadcast=allow_broadcast)
problems = ', '.join(f'{k}={v}' for k, v in opts.items() if v)
raise ValueError(
f'socket modifier keyword arguments can not be used '
f'when sock is specified. ({problems})')
sock.setblocking(False)
r_addr = None
else:
if not (local_addr or remote_addr):
if family == 0:
raise ValueError('unexpected address family')
addr_pairs_info = (((family, proto), (None, None)),)
elif hasattr(socket, 'AF_UNIX') and family == socket.AF_UNIX:
for addr in (local_addr, remote_addr):
if addr is not None and not isinstance(addr, str):
raise TypeError('string is expected')
if local_addr and local_addr[0] not in (0, '\x00'):
try:
if stat.S_ISSOCK(os.stat(local_addr).st_mode):
os.remove(local_addr)
except FileNotFoundError:
pass
except OSError as err:
# Directory may have permissions only to create socket.
logger.error('Unable to check or remove stale UNIX '
'socket %r: %r',
local_addr, err)
addr_pairs_info = (((family, proto),
(local_addr, remote_addr)), )
else:
# join address by (family, protocol)
addr_infos = {} # Using order preserving dict
for idx, addr in ((0, local_addr), (1, remote_addr)):
if addr is not None:
assert isinstance(addr, tuple) and len(addr) == 2, (
'2-tuple is expected')
infos = await self._ensure_resolved(
addr, family=family, type=socket.SOCK_DGRAM,
proto=proto, flags=flags, loop=self)
if not infos:
raise OSError('getaddrinfo() returned empty list')
for fam, _, pro, _, address in infos:
key = (fam, pro)
if key not in addr_infos:
addr_infos[key] = [None, None]
addr_infos[key][idx] = address
# each addr has to have info for each (family, proto) pair
addr_pairs_info = [
(key, addr_pair) for key, addr_pair in addr_infos.items()
if not ((local_addr and addr_pair[0] is None) or
(remote_addr and addr_pair[1] is None))]
if not addr_pairs_info:
raise ValueError('can not get address information')
exceptions = []
# bpo-37228
if reuse_address is not _unset:
if reuse_address:
raise ValueError("Passing `reuse_address=True` is no "
"longer supported, as the usage of "
"SO_REUSEPORT in UDP poses a significant "
"security concern.")
else:
warnings.warn("The *reuse_address* parameter has been "
"deprecated as of 3.5.10 and is scheduled "
"for removal in 3.11.", DeprecationWarning,
stacklevel=2)
for ((family, proto),
(local_address, remote_address)) in addr_pairs_info:
sock = None
r_addr = None
try:
sock = socket.socket(
family=family, type=socket.SOCK_DGRAM, proto=proto)
if reuse_port:
_set_reuseport(sock)
if allow_broadcast:
sock.setsockopt(
socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
sock.setblocking(False)
if local_addr:
sock.bind(local_address)
if remote_addr:
if not allow_broadcast:
await self.sock_connect(sock, remote_address)
r_addr = remote_address
except OSError as exc:
if sock is not None:
sock.close()
exceptions.append(exc)
except:
if sock is not None:
sock.close()
raise
else:
break
else:
raise exceptions[0]
protocol = protocol_factory()
waiter = self.create_future()
transport = self._make_datagram_transport(
sock, protocol, r_addr, waiter)
if self._debug:
if local_addr:
logger.info("Datagram endpoint local_addr=%r remote_addr=%r "
"created: (%r, %r)",
local_addr, remote_addr, transport, protocol)
else:
logger.debug("Datagram endpoint remote_addr=%r created: "
"(%r, %r)",
remote_addr, transport, protocol)
try:
await waiter
except:
transport.close()
raise
return transport, protocol
async def _ensure_resolved(self, address, *,
family=0, type=socket.SOCK_STREAM,
proto=0, flags=0, loop):
host, port = address[:2]
info = _ipaddr_info(host, port, family, type, proto, *address[2:])
if info is not None:
# "host" is already a resolved IP.
return [info]
else:
return await loop.getaddrinfo(host, port, family=family, type=type,
proto=proto, flags=flags)
async def _create_server_getaddrinfo(self, host, port, family, flags):
infos = await self._ensure_resolved((host, port), family=family,
type=socket.SOCK_STREAM,
flags=flags, loop=self)
if not infos:
raise OSError(f'getaddrinfo({host!r}) returned empty list')
return infos
async def create_server(
self, protocol_factory, host=None, port=None,
*,
family=socket.AF_UNSPEC,
flags=socket.AI_PASSIVE,
sock=None,
backlog=100,
ssl=None,
reuse_address=None,
reuse_port=None,
ssl_handshake_timeout=None,
start_serving=True):
"""Create a TCP server.
The host parameter can be a string, in that case the TCP server is
bound to host and port.
The host parameter can also be a sequence of strings and in that case
the TCP server is bound to all hosts of the sequence. If a host
appears multiple times (possibly indirectly e.g. when hostnames
resolve to the same IP address), the server is only bound once to that
host.
Return a Server object which can be used to stop the service.
This method is a coroutine.
"""
if isinstance(ssl, bool):
raise TypeError('ssl argument must be an SSLContext or None')
if ssl_handshake_timeout is not None and ssl is None:
raise ValueError(
'ssl_handshake_timeout is only meaningful with ssl')
if host is not None or port is not None:
if sock is not None:
raise ValueError(
'host/port and sock can not be specified at the same time')
if reuse_address is None:
reuse_address = os.name == 'posix' and sys.platform != 'cygwin'
sockets = []
if host == '':
hosts = [None]
elif (isinstance(host, str) or
not isinstance(host, collections.abc.Iterable)):
hosts = [host]
else:
hosts = host
fs = [self._create_server_getaddrinfo(host, port, family=family,
flags=flags)
for host in hosts]
infos = await tasks.gather(*fs)
infos = set(itertools.chain.from_iterable(infos))
completed = False
try:
for res in infos:
af, socktype, proto, canonname, sa = res
try:
sock = socket.socket(af, socktype, proto)
except socket.error:
# Assume it's a bad family/type/protocol combination.
if self._debug:
logger.warning('create_server() failed to create '
'socket.socket(%r, %r, %r)',
af, socktype, proto, exc_info=True)
continue
sockets.append(sock)
if reuse_address:
sock.setsockopt(
socket.SOL_SOCKET, socket.SO_REUSEADDR, True)
if reuse_port:
_set_reuseport(sock)
# Disable IPv4/IPv6 dual stack support (enabled by
# default on Linux) which makes a single socket
# listen on both address families.
if (_HAS_IPv6 and
af == socket.AF_INET6 and
hasattr(socket, 'IPPROTO_IPV6')):
sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_V6ONLY,
True)
try:
sock.bind(sa)
except OSError as err:
raise OSError(err.errno, 'error while attempting '
'to bind on address %r: %s'
% (sa, err.strerror.lower())) from None
completed = True
finally:
if not completed:
for sock in sockets:
sock.close()
else:
if sock is None:
raise ValueError('Neither host/port nor sock were specified')
if sock.type != socket.SOCK_STREAM:
raise ValueError(f'A Stream Socket was expected, got {sock!r}')
sockets = [sock]
for sock in sockets:
sock.setblocking(False)
server = Server(self, sockets, protocol_factory,
ssl, backlog, ssl_handshake_timeout)
if start_serving:
server._start_serving()
# Skip one loop iteration so that all 'loop.add_reader'
# go through.
await tasks.sleep(0)
if self._debug:
logger.info("%r is serving", server)
return server
async def connect_accepted_socket(
self, protocol_factory, sock,
*, ssl=None,
ssl_handshake_timeout=None):
if sock.type != socket.SOCK_STREAM:
raise ValueError(f'A Stream Socket was expected, got {sock!r}')
if ssl_handshake_timeout is not None and not ssl:
raise ValueError(
'ssl_handshake_timeout is only meaningful with ssl')
transport, protocol = await self._create_connection_transport(
sock, protocol_factory, ssl, '', server_side=True,
ssl_handshake_timeout=ssl_handshake_timeout)
if self._debug:
# Get the socket from the transport because SSL transport closes
# the old socket and creates a new SSL socket
sock = transport.get_extra_info('socket')
logger.debug("%r handled: (%r, %r)", sock, transport, protocol)
return transport, protocol
async def connect_read_pipe(self, protocol_factory, pipe):
protocol = protocol_factory()
waiter = self.create_future()
transport = self._make_read_pipe_transport(pipe, protocol, waiter)
try:
await waiter
except:
transport.close()
raise
if self._debug:
logger.debug('Read pipe %r connected: (%r, %r)',
pipe.fileno(), transport, protocol)
return transport, protocol
async def connect_write_pipe(self, protocol_factory, pipe):
protocol = protocol_factory()
waiter = self.create_future()
transport = self._make_write_pipe_transport(pipe, protocol, waiter)
try:
await waiter
except:
transport.close()
raise
if self._debug:
logger.debug('Write pipe %r connected: (%r, %r)',
pipe.fileno(), transport, protocol)
return transport, protocol
def _log_subprocess(self, msg, stdin, stdout, stderr):
info = [msg]
if stdin is not None:
info.append(f'stdin={_format_pipe(stdin)}')
if stdout is not None and stderr == subprocess.STDOUT:
info.append(f'stdout=stderr={_format_pipe(stdout)}')
else:
if stdout is not None:
info.append(f'stdout={_format_pipe(stdout)}')
if stderr is not None:
info.append(f'stderr={_format_pipe(stderr)}')
logger.debug(' '.join(info))
async def subprocess_shell(self, protocol_factory, cmd, *,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=False,
shell=True, bufsize=0,
encoding=None, errors=None, text=None,
**kwargs):
if not isinstance(cmd, (bytes, str)):
raise ValueError("cmd must be a string")
if universal_newlines:
raise ValueError("universal_newlines must be False")
if not shell:
raise ValueError("shell must be True")
if bufsize != 0:
raise ValueError("bufsize must be 0")
if text:
raise ValueError("text must be False")
if encoding is not None:
raise ValueError("encoding must be None")
if errors is not None:
raise ValueError("errors must be None")
protocol = protocol_factory()
debug_log = None
if self._debug:
# don't log parameters: they may contain sensitive information
# (password) and may be too long
debug_log = 'run shell command %r' % cmd
self._log_subprocess(debug_log, stdin, stdout, stderr)
transport = await self._make_subprocess_transport(
protocol, cmd, True, stdin, stdout, stderr, bufsize, **kwargs)
if self._debug and debug_log is not None:
logger.info('%s: %r', debug_log, transport)
return transport, protocol
async def subprocess_exec(self, protocol_factory, program, *args,
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, universal_newlines=False,
shell=False, bufsize=0,
encoding=None, errors=None, text=None,
**kwargs):
if universal_newlines:
raise ValueError("universal_newlines must be False")
if shell:
raise ValueError("shell must be False")
if bufsize != 0:
raise ValueError("bufsize must be 0")
if text:
raise ValueError("text must be False")
if encoding is not None:
raise ValueError("encoding must be None")
if errors is not None:
raise ValueError("errors must be None")
popen_args = (program,) + args
protocol = protocol_factory()
debug_log = None
if self._debug:
# don't log parameters: they may contain sensitive information
# (password) and may be too long
debug_log = f'execute program {program!r}'
self._log_subprocess(debug_log, stdin, stdout, stderr)
transport = await self._make_subprocess_transport(
protocol, popen_args, False, stdin, stdout, stderr,
bufsize, **kwargs)
if self._debug and debug_log is not None:
logger.info('%s: %r', debug_log, transport)
return transport, protocol
def get_exception_handler(self):
"""Return an exception handler, or None if the default one is in use.
"""
return self._exception_handler
def set_exception_handler(self, handler):
"""Set handler as the new event loop exception handler.
If handler is None, the default exception handler will
be set.
If handler is a callable object, it should have a
signature matching '(loop, context)', where 'loop'
will be a reference to the active event loop, 'context'
will be a dict object (see `call_exception_handler()`
documentation for details about context).
"""
if handler is not None and not callable(handler):
raise TypeError(f'A callable object or None is expected, '
f'got {handler!r}')
self._exception_handler = handler
def default_exception_handler(self, context):
"""Default exception handler.
This is called when an exception occurs and no exception
handler is set, and can be called by a custom exception
handler that wants to defer to the default behavior.
This default handler logs the error message and other
context-dependent information. In debug mode, a truncated
stack trace is also appended showing where the given object
(e.g. a handle or future or task) was created, if any.
The context parameter has the same meaning as in
`call_exception_handler()`.
"""
message = context.get('message')
if not message:
message = 'Unhandled exception in event loop'
exception = context.get('exception')
if exception is not None:
exc_info = (type(exception), exception, exception.__traceback__)
else:
exc_info = False
if ('source_traceback' not in context and
self._current_handle is not None and
self._current_handle._source_traceback):
context['handle_traceback'] = \
self._current_handle._source_traceback
log_lines = [message]
for key in sorted(context):
if key in {'message', 'exception'}:
continue
value = context[key]
if key == 'source_traceback':
tb = ''.join(traceback.format_list(value))
value = 'Object created at (most recent call last):\n'
value += tb.rstrip()
elif key == 'handle_traceback':
tb = ''.join(traceback.format_list(value))
value = 'Handle created at (most recent call last):\n'
value += tb.rstrip()
else:
value = repr(value)
log_lines.append(f'{key}: {value}')
logger.error('\n'.join(log_lines), exc_info=exc_info)
def call_exception_handler(self, context):
"""Call the current event loop's exception handler.
The context argument is a dict containing the following keys:
- 'message': Error message;
- 'exception' (optional): Exception object;
- 'future' (optional): Future instance;
- 'task' (optional): Task instance;
- 'handle' (optional): Handle instance;
- 'protocol' (optional): Protocol instance;
- 'transport' (optional): Transport instance;
- 'socket' (optional): Socket instance;
- 'asyncgen' (optional): Asynchronous generator that caused
the exception.
New keys maybe introduced in the future.
Note: do not overload this method in an event loop subclass.
For custom exception handling, use the
`set_exception_handler()` method.
"""
if self._exception_handler is None:
try:
self.default_exception_handler(context)
except (SystemExit, KeyboardInterrupt):
raise
except BaseException:
# Second protection layer for unexpected errors
# in the default implementation, as well as for subclassed
# event loops with overloaded "default_exception_handler".
logger.error('Exception in default exception handler',
exc_info=True)
else:
try:
self._exception_handler(self, context)
except (SystemExit, KeyboardInterrupt):
raise
except BaseException as exc:
# Exception in the user set custom exception handler.
try:
# Let's try default handler.
self.default_exception_handler({
'message': 'Unhandled error in exception handler',
'exception': exc,
'context': context,
})
except (SystemExit, KeyboardInterrupt):
raise
except BaseException:
# Guard 'default_exception_handler' in case it is
# overloaded.
logger.error('Exception in default exception handler '
'while handling an unexpected error '
'in custom exception handler',
exc_info=True)
def _add_callback(self, handle):
"""Add a Handle to _scheduled (TimerHandle) or _ready."""
assert isinstance(handle, events.Handle), 'A Handle is required here'
if handle._cancelled:
return
assert not isinstance(handle, events.TimerHandle)
self._ready.append(handle)
def _add_callback_signalsafe(self, handle):
"""Like _add_callback() but called from a signal handler."""
self._add_callback(handle)
self._write_to_self()
def _timer_handle_cancelled(self, handle):
"""Notification that a TimerHandle has been cancelled."""
if handle._scheduled:
self._timer_cancelled_count += 1
def _run_once(self):
"""Run one full iteration of the event loop.
This calls all currently ready callbacks, polls for I/O,
schedules the resulting callbacks, and finally schedules
'call_later' callbacks.
"""
sched_count = len(self._scheduled)
if (sched_count > _MIN_SCHEDULED_TIMER_HANDLES and
self._timer_cancelled_count / sched_count >
_MIN_CANCELLED_TIMER_HANDLES_FRACTION):
# Remove delayed calls that were cancelled if their number
# is too high
new_scheduled = []
for handle in self._scheduled:
if handle._cancelled:
handle._scheduled = False
else:
new_scheduled.append(handle)
heapq.heapify(new_scheduled)
self._scheduled = new_scheduled
self._timer_cancelled_count = 0
else:
# Remove delayed calls that were cancelled from head of queue.
while self._scheduled and self._scheduled[0]._cancelled:
self._timer_cancelled_count -= 1
handle = heapq.heappop(self._scheduled)
handle._scheduled = False
timeout = None
if self._ready or self._stopping:
timeout = 0
elif self._scheduled:
# Compute the desired timeout.
when = self._scheduled[0]._when
timeout = min(max(0, when - self.time()), MAXIMUM_SELECT_TIMEOUT)
event_list = self._selector.select(timeout)
self._process_events(event_list)
# Handle 'later' callbacks that are ready.
end_time = self.time() + self._clock_resolution
while self._scheduled:
handle = self._scheduled[0]
if handle._when >= end_time:
break
handle = heapq.heappop(self._scheduled)
handle._scheduled = False
self._ready.append(handle)
# This is the only place where callbacks are actually *called*.
# All other places just add them to ready.
# Note: We run all currently scheduled callbacks, but not any
# callbacks scheduled by callbacks run this time around --
# they will be run the next time (after another I/O poll).
# Use an idiom that is thread-safe without using locks.
ntodo = len(self._ready)
for i in range(ntodo):
handle = self._ready.popleft()
if handle._cancelled:
continue
if self._debug:
try:
self._current_handle = handle
t0 = self.time()
handle._run()
dt = self.time() - t0
if dt >= self.slow_callback_duration:
logger.warning('Executing %s took %.3f seconds',
_format_handle(handle), dt)
finally:
self._current_handle = None
else:
handle._run()
handle = None # Needed to break cycles when an exception occurs.
def _set_coroutine_origin_tracking(self, enabled):
if bool(enabled) == bool(self._coroutine_origin_tracking_enabled):
return
if enabled:
self._coroutine_origin_tracking_saved_depth = (
sys.get_coroutine_origin_tracking_depth())
sys.set_coroutine_origin_tracking_depth(
constants.DEBUG_STACK_DEPTH)
else:
sys.set_coroutine_origin_tracking_depth(
self._coroutine_origin_tracking_saved_depth)
self._coroutine_origin_tracking_enabled = enabled
def get_debug(self):
return self._debug
def set_debug(self, enabled):
self._debug = enabled
if self.is_running():
self.call_soon_threadsafe(self._set_coroutine_origin_tracking, enabled)
| 38.516518 | 83 | 0.573689 |
import collections
import collections.abc
import concurrent.futures
import functools
import heapq
import itertools
import os
import socket
import stat
import subprocess
import threading
import time
import traceback
import sys
import warnings
import weakref
try:
import ssl
except ImportError:
ssl = None
from . import constants
from . import coroutines
from . import events
from . import exceptions
from . import futures
from . import protocols
from . import sslproto
from . import staggered
from . import tasks
from . import transports
from . import trsock
from .log import logger
__all__ = 'BaseEventLoop',
_MIN_SCHEDULED_TIMER_HANDLES = 100
_MIN_CANCELLED_TIMER_HANDLES_FRACTION = 0.5
_HAS_IPv6 = hasattr(socket, 'AF_INET6')
MAXIMUM_SELECT_TIMEOUT = 24 * 3600
# *reuse_address* parameter
_unset = object()
def _format_handle(handle):
cb = handle._callback
if isinstance(getattr(cb, '__self__', None), tasks.Task):
# format the task
return repr(cb.__self__)
else:
return str(handle)
def _format_pipe(fd):
if fd == subprocess.PIPE:
return '<pipe>'
elif fd == subprocess.STDOUT:
return '<stdout>'
else:
return repr(fd)
def _set_reuseport(sock):
if not hasattr(socket, 'SO_REUSEPORT'):
raise ValueError('reuse_port not supported by socket module')
else:
try:
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)
except OSError:
raise ValueError('reuse_port not supported by socket module, '
'SO_REUSEPORT defined but not implemented.')
def _ipaddr_info(host, port, family, type, proto, flowinfo=0, scopeid=0):
# Try to skip getaddrinfo if "host" is already an IP. Users might have
# handled name resolution in their own code and pass in resolved IPs.
if not hasattr(socket, 'inet_pton'):
return
if proto not in {0, socket.IPPROTO_TCP, socket.IPPROTO_UDP} or \
host is None:
return None
if type == socket.SOCK_STREAM:
proto = socket.IPPROTO_TCP
elif type == socket.SOCK_DGRAM:
proto = socket.IPPROTO_UDP
else:
return None
if port is None:
port = 0
elif isinstance(port, bytes) and port == b'':
port = 0
elif isinstance(port, str) and port == '':
port = 0
else:
# If port's a service name like "http", don't skip getaddrinfo.
try:
port = int(port)
except (TypeError, ValueError):
return None
if family == socket.AF_UNSPEC:
afs = [socket.AF_INET]
if _HAS_IPv6:
afs.append(socket.AF_INET6)
else:
afs = [family]
if isinstance(host, bytes):
host = host.decode('idna')
if '%' in host:
# Linux's inet_pton doesn't accept an IPv6 zone index after host,
# like '::1%lo0'.
return None
for af in afs:
try:
socket.inet_pton(af, host)
# The host has already been resolved.
if _HAS_IPv6 and af == socket.AF_INET6:
return af, type, proto, '', (host, port, flowinfo, scopeid)
else:
return af, type, proto, '', (host, port)
except OSError:
pass
# "host" is not an IP address.
return None
def _interleave_addrinfos(addrinfos, first_address_family_count=1):
# Group addresses by family
addrinfos_by_family = collections.OrderedDict()
for addr in addrinfos:
family = addr[0]
if family not in addrinfos_by_family:
addrinfos_by_family[family] = []
addrinfos_by_family[family].append(addr)
addrinfos_lists = list(addrinfos_by_family.values())
reordered = []
if first_address_family_count > 1:
reordered.extend(addrinfos_lists[0][:first_address_family_count - 1])
del addrinfos_lists[0][:first_address_family_count - 1]
reordered.extend(
a for a in itertools.chain.from_iterable(
itertools.zip_longest(*addrinfos_lists)
) if a is not None)
return reordered
def _run_until_complete_cb(fut):
if not fut.cancelled():
exc = fut.exception()
if isinstance(exc, (SystemExit, KeyboardInterrupt)):
# Issue #22429: run_forever() already finished, no need to
# stop it.
return
futures._get_loop(fut).stop()
if hasattr(socket, 'TCP_NODELAY'):
def _set_nodelay(sock):
if (sock.family in {socket.AF_INET, socket.AF_INET6} and
sock.type == socket.SOCK_STREAM and
sock.proto == socket.IPPROTO_TCP):
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
else:
def _set_nodelay(sock):
pass
class _SendfileFallbackProtocol(protocols.Protocol):
def __init__(self, transp):
if not isinstance(transp, transports._FlowControlMixin):
raise TypeError("transport should be _FlowControlMixin instance")
self._transport = transp
self._proto = transp.get_protocol()
self._should_resume_reading = transp.is_reading()
self._should_resume_writing = transp._protocol_paused
transp.pause_reading()
transp.set_protocol(self)
if self._should_resume_writing:
self._write_ready_fut = self._transport._loop.create_future()
else:
self._write_ready_fut = None
async def drain(self):
if self._transport.is_closing():
raise ConnectionError("Connection closed by peer")
fut = self._write_ready_fut
if fut is None:
return
await fut
def connection_made(self, transport):
raise RuntimeError("Invalid state: "
"connection should have been established already.")
def connection_lost(self, exc):
if self._write_ready_fut is not None:
# Never happens if peer disconnects after sending the whole content
# Thus disconnection is always an exception from user perspective
if exc is None:
self._write_ready_fut.set_exception(
ConnectionError("Connection is closed by peer"))
else:
self._write_ready_fut.set_exception(exc)
self._proto.connection_lost(exc)
def pause_writing(self):
if self._write_ready_fut is not None:
return
self._write_ready_fut = self._transport._loop.create_future()
def resume_writing(self):
if self._write_ready_fut is None:
return
self._write_ready_fut.set_result(False)
self._write_ready_fut = None
def data_received(self, data):
raise RuntimeError("Invalid state: reading should be paused")
def eof_received(self):
raise RuntimeError("Invalid state: reading should be paused")
async def restore(self):
self._transport.set_protocol(self._proto)
if self._should_resume_reading:
self._transport.resume_reading()
if self._write_ready_fut is not None:
# Cancel the future.
# Basically it has no effect because protocol is switched back,
# no code should wait for it anymore.
self._write_ready_fut.cancel()
if self._should_resume_writing:
self._proto.resume_writing()
class Server(events.AbstractServer):
def __init__(self, loop, sockets, protocol_factory, ssl_context, backlog,
ssl_handshake_timeout):
self._loop = loop
self._sockets = sockets
self._active_count = 0
self._waiters = []
self._protocol_factory = protocol_factory
self._backlog = backlog
self._ssl_context = ssl_context
self._ssl_handshake_timeout = ssl_handshake_timeout
self._serving = False
self._serving_forever_fut = None
def __repr__(self):
return f'<{self.__class__.__name__} sockets={self.sockets!r}>'
def _attach(self):
assert self._sockets is not None
self._active_count += 1
def _detach(self):
assert self._active_count > 0
self._active_count -= 1
if self._active_count == 0 and self._sockets is None:
self._wakeup()
def _wakeup(self):
waiters = self._waiters
self._waiters = None
for waiter in waiters:
if not waiter.done():
waiter.set_result(waiter)
def _start_serving(self):
if self._serving:
return
self._serving = True
for sock in self._sockets:
sock.listen(self._backlog)
self._loop._start_serving(
self._protocol_factory, sock, self._ssl_context,
self, self._backlog, self._ssl_handshake_timeout)
def get_loop(self):
return self._loop
def is_serving(self):
return self._serving
@property
def sockets(self):
if self._sockets is None:
return ()
return tuple(trsock.TransportSocket(s) for s in self._sockets)
def close(self):
sockets = self._sockets
if sockets is None:
return
self._sockets = None
for sock in sockets:
self._loop._stop_serving(sock)
self._serving = False
if (self._serving_forever_fut is not None and
not self._serving_forever_fut.done()):
self._serving_forever_fut.cancel()
self._serving_forever_fut = None
if self._active_count == 0:
self._wakeup()
async def start_serving(self):
self._start_serving()
# Skip one loop iteration so that all 'loop.add_reader'
# go through.
await tasks.sleep(0)
async def serve_forever(self):
if self._serving_forever_fut is not None:
raise RuntimeError(
f'server {self!r} is already being awaited on serve_forever()')
if self._sockets is None:
raise RuntimeError(f'server {self!r} is closed')
self._start_serving()
self._serving_forever_fut = self._loop.create_future()
try:
await self._serving_forever_fut
except exceptions.CancelledError:
try:
self.close()
await self.wait_closed()
finally:
raise
finally:
self._serving_forever_fut = None
async def wait_closed(self):
if self._sockets is None or self._waiters is None:
return
waiter = self._loop.create_future()
self._waiters.append(waiter)
await waiter
class BaseEventLoop(events.AbstractEventLoop):
def __init__(self):
self._timer_cancelled_count = 0
self._closed = False
self._stopping = False
self._ready = collections.deque()
self._scheduled = []
self._default_executor = None
self._internal_fds = 0
# Identifier of the thread running the event loop, or None if the
# event loop is not running
self._thread_id = None
self._clock_resolution = time.get_clock_info('monotonic').resolution
self._exception_handler = None
self.set_debug(coroutines._is_debug_mode())
# In debug mode, if the execution of a callback or a step of a task
# exceed this duration in seconds, the slow callback/task is logged.
self.slow_callback_duration = 0.1
self._current_handle = None
self._task_factory = None
self._coroutine_origin_tracking_enabled = False
self._coroutine_origin_tracking_saved_depth = None
# A weak set of all asynchronous generators that are
# being iterated by the loop.
self._asyncgens = weakref.WeakSet()
# Set to True when `loop.shutdown_asyncgens` is called.
self._asyncgens_shutdown_called = False
# Set to True when `loop.shutdown_default_executor` is called.
self._executor_shutdown_called = False
def __repr__(self):
return (
f'<{self.__class__.__name__} running={self.is_running()} '
f'closed={self.is_closed()} debug={self.get_debug()}>'
)
def create_future(self):
return futures.Future(loop=self)
def create_task(self, coro, *, name=None):
self._check_closed()
if self._task_factory is None:
task = tasks.Task(coro, loop=self, name=name)
if task._source_traceback:
del task._source_traceback[-1]
else:
task = self._task_factory(self, coro)
tasks._set_task_name(task, name)
return task
def set_task_factory(self, factory):
if factory is not None and not callable(factory):
raise TypeError('task factory must be a callable or None')
self._task_factory = factory
def get_task_factory(self):
return self._task_factory
def _make_socket_transport(self, sock, protocol, waiter=None, *,
extra=None, server=None):
raise NotImplementedError
def _make_ssl_transport(
self, rawsock, protocol, sslcontext, waiter=None,
*, server_side=False, server_hostname=None,
extra=None, server=None,
ssl_handshake_timeout=None,
call_connection_made=True):
raise NotImplementedError
def _make_datagram_transport(self, sock, protocol,
address=None, waiter=None, extra=None):
raise NotImplementedError
def _make_read_pipe_transport(self, pipe, protocol, waiter=None,
extra=None):
raise NotImplementedError
def _make_write_pipe_transport(self, pipe, protocol, waiter=None,
extra=None):
raise NotImplementedError
async def _make_subprocess_transport(self, protocol, args, shell,
stdin, stdout, stderr, bufsize,
extra=None, **kwargs):
raise NotImplementedError
def _write_to_self(self):
raise NotImplementedError
def _process_events(self, event_list):
raise NotImplementedError
def _check_closed(self):
if self._closed:
raise RuntimeError('Event loop is closed')
def _check_default_executor(self):
if self._executor_shutdown_called:
raise RuntimeError('Executor shutdown has been called')
def _asyncgen_finalizer_hook(self, agen):
self._asyncgens.discard(agen)
if not self.is_closed():
self.call_soon_threadsafe(self.create_task, agen.aclose())
def _asyncgen_firstiter_hook(self, agen):
if self._asyncgens_shutdown_called:
warnings.warn(
f"asynchronous generator {agen!r} was scheduled after "
f"loop.shutdown_asyncgens() call",
ResourceWarning, source=self)
self._asyncgens.add(agen)
async def shutdown_asyncgens(self):
self._asyncgens_shutdown_called = True
if not len(self._asyncgens):
# If Python version is <3.6 or we don't have any asynchronous
return
closing_agens = list(self._asyncgens)
self._asyncgens.clear()
results = await tasks.gather(
*[ag.aclose() for ag in closing_agens],
return_exceptions=True)
for result, agen in zip(results, closing_agens):
if isinstance(result, Exception):
self.call_exception_handler({
'message': f'an error occurred during closing of '
f'asynchronous generator {agen!r}',
'exception': result,
'asyncgen': agen
})
async def shutdown_default_executor(self):
self._executor_shutdown_called = True
if self._default_executor is None:
return
future = self.create_future()
thread = threading.Thread(target=self._do_shutdown, args=(future,))
thread.start()
try:
await future
finally:
thread.join()
def _do_shutdown(self, future):
try:
self._default_executor.shutdown(wait=True)
self.call_soon_threadsafe(future.set_result, None)
except Exception as ex:
self.call_soon_threadsafe(future.set_exception, ex)
def _check_running(self):
if self.is_running():
raise RuntimeError('This event loop is already running')
if events._get_running_loop() is not None:
raise RuntimeError(
'Cannot run the event loop while another loop is running')
def run_forever(self):
self._check_closed()
self._check_running()
self._set_coroutine_origin_tracking(self._debug)
self._thread_id = threading.get_ident()
old_agen_hooks = sys.get_asyncgen_hooks()
sys.set_asyncgen_hooks(firstiter=self._asyncgen_firstiter_hook,
finalizer=self._asyncgen_finalizer_hook)
try:
events._set_running_loop(self)
while True:
self._run_once()
if self._stopping:
break
finally:
self._stopping = False
self._thread_id = None
events._set_running_loop(None)
self._set_coroutine_origin_tracking(False)
sys.set_asyncgen_hooks(*old_agen_hooks)
def run_until_complete(self, future):
self._check_closed()
self._check_running()
new_task = not futures.isfuture(future)
future = tasks.ensure_future(future, loop=self)
if new_task:
# is no need to log the "destroy pending task" message
future._log_destroy_pending = False
future.add_done_callback(_run_until_complete_cb)
try:
self.run_forever()
except:
if new_task and future.done() and not future.cancelled():
# The coroutine raised a BaseException. Consume the exception
# to not log a warning, the caller doesn't have access to the
future.exception()
raise
finally:
future.remove_done_callback(_run_until_complete_cb)
if not future.done():
raise RuntimeError('Event loop stopped before Future completed.')
return future.result()
def stop(self):
self._stopping = True
def close(self):
if self.is_running():
raise RuntimeError("Cannot close a running event loop")
if self._closed:
return
if self._debug:
logger.debug("Close %r", self)
self._closed = True
self._ready.clear()
self._scheduled.clear()
self._executor_shutdown_called = True
executor = self._default_executor
if executor is not None:
self._default_executor = None
executor.shutdown(wait=False)
def is_closed(self):
return self._closed
def __del__(self, _warn=warnings.warn):
if not self.is_closed():
_warn(f"unclosed event loop {self!r}", ResourceWarning, source=self)
if not self.is_running():
self.close()
def is_running(self):
return (self._thread_id is not None)
def time(self):
return time.monotonic()
def call_later(self, delay, callback, *args, context=None):
timer = self.call_at(self.time() + delay, callback, *args,
context=context)
if timer._source_traceback:
del timer._source_traceback[-1]
return timer
def call_at(self, when, callback, *args, context=None):
self._check_closed()
if self._debug:
self._check_thread()
self._check_callback(callback, 'call_at')
timer = events.TimerHandle(when, callback, args, self, context)
if timer._source_traceback:
del timer._source_traceback[-1]
heapq.heappush(self._scheduled, timer)
timer._scheduled = True
return timer
def call_soon(self, callback, *args, context=None):
self._check_closed()
if self._debug:
self._check_thread()
self._check_callback(callback, 'call_soon')
handle = self._call_soon(callback, args, context)
if handle._source_traceback:
del handle._source_traceback[-1]
return handle
def _check_callback(self, callback, method):
if (coroutines.iscoroutine(callback) or
coroutines.iscoroutinefunction(callback)):
raise TypeError(
f"coroutines cannot be used with {method}()")
if not callable(callback):
raise TypeError(
f'a callable object was expected by {method}(), '
f'got {callback!r}')
def _call_soon(self, callback, args, context):
handle = events.Handle(callback, args, self, context)
if handle._source_traceback:
del handle._source_traceback[-1]
self._ready.append(handle)
return handle
def _check_thread(self):
if self._thread_id is None:
return
thread_id = threading.get_ident()
if thread_id != self._thread_id:
raise RuntimeError(
"Non-thread-safe operation invoked on an event loop other "
"than the current one")
def call_soon_threadsafe(self, callback, *args, context=None):
self._check_closed()
if self._debug:
self._check_callback(callback, 'call_soon_threadsafe')
handle = self._call_soon(callback, args, context)
if handle._source_traceback:
del handle._source_traceback[-1]
self._write_to_self()
return handle
def run_in_executor(self, executor, func, *args):
self._check_closed()
if self._debug:
self._check_callback(func, 'run_in_executor')
if executor is None:
executor = self._default_executor
self._check_default_executor()
if executor is None:
executor = concurrent.futures.ThreadPoolExecutor(
thread_name_prefix='asyncio'
)
self._default_executor = executor
return futures.wrap_future(
executor.submit(func, *args), loop=self)
def set_default_executor(self, executor):
if not isinstance(executor, concurrent.futures.ThreadPoolExecutor):
warnings.warn(
'Using the default executor that is not an instance of '
'ThreadPoolExecutor is deprecated and will be prohibited '
'in Python 3.9',
DeprecationWarning, 2)
self._default_executor = executor
def _getaddrinfo_debug(self, host, port, family, type, proto, flags):
msg = [f"{host}:{port!r}"]
if family:
msg.append(f'family={family!r}')
if type:
msg.append(f'type={type!r}')
if proto:
msg.append(f'proto={proto!r}')
if flags:
msg.append(f'flags={flags!r}')
msg = ', '.join(msg)
logger.debug('Get address info %s', msg)
t0 = self.time()
addrinfo = socket.getaddrinfo(host, port, family, type, proto, flags)
dt = self.time() - t0
msg = f'Getting address info {msg} took {dt * 1e3:.3f}ms: {addrinfo!r}'
if dt >= self.slow_callback_duration:
logger.info(msg)
else:
logger.debug(msg)
return addrinfo
async def getaddrinfo(self, host, port, *,
family=0, type=0, proto=0, flags=0):
if self._debug:
getaddr_func = self._getaddrinfo_debug
else:
getaddr_func = socket.getaddrinfo
return await self.run_in_executor(
None, getaddr_func, host, port, family, type, proto, flags)
async def getnameinfo(self, sockaddr, flags=0):
return await self.run_in_executor(
None, socket.getnameinfo, sockaddr, flags)
async def sock_sendfile(self, sock, file, offset=0, count=None,
*, fallback=True):
if self._debug and sock.gettimeout() != 0:
raise ValueError("the socket must be non-blocking")
self._check_sendfile_params(sock, file, offset, count)
try:
return await self._sock_sendfile_native(sock, file,
offset, count)
except exceptions.SendfileNotAvailableError as exc:
if not fallback:
raise
return await self._sock_sendfile_fallback(sock, file,
offset, count)
async def _sock_sendfile_native(self, sock, file, offset, count):
raise exceptions.SendfileNotAvailableError(
f"syscall sendfile is not available for socket {sock!r} "
"and file {file!r} combination")
async def _sock_sendfile_fallback(self, sock, file, offset, count):
if offset:
file.seek(offset)
blocksize = (
min(count, constants.SENDFILE_FALLBACK_READBUFFER_SIZE)
if count else constants.SENDFILE_FALLBACK_READBUFFER_SIZE
)
buf = bytearray(blocksize)
total_sent = 0
try:
while True:
if count:
blocksize = min(count - total_sent, blocksize)
if blocksize <= 0:
break
view = memoryview(buf)[:blocksize]
read = await self.run_in_executor(None, file.readinto, view)
if not read:
break
await self.sock_sendall(sock, view[:read])
total_sent += read
return total_sent
finally:
if total_sent > 0 and hasattr(file, 'seek'):
file.seek(offset + total_sent)
def _check_sendfile_params(self, sock, file, offset, count):
if 'b' not in getattr(file, 'mode', 'b'):
raise ValueError("file should be opened in binary mode")
if not sock.type == socket.SOCK_STREAM:
raise ValueError("only SOCK_STREAM type sockets are supported")
if count is not None:
if not isinstance(count, int):
raise TypeError(
"count must be a positive integer (got {!r})".format(count))
if count <= 0:
raise ValueError(
"count must be a positive integer (got {!r})".format(count))
if not isinstance(offset, int):
raise TypeError(
"offset must be a non-negative integer (got {!r})".format(
offset))
if offset < 0:
raise ValueError(
"offset must be a non-negative integer (got {!r})".format(
offset))
async def _connect_sock(self, exceptions, addr_info, local_addr_infos=None):
my_exceptions = []
exceptions.append(my_exceptions)
family, type_, proto, _, address = addr_info
sock = None
try:
sock = socket.socket(family=family, type=type_, proto=proto)
sock.setblocking(False)
if local_addr_infos is not None:
for _, _, _, _, laddr in local_addr_infos:
try:
sock.bind(laddr)
break
except OSError as exc:
msg = (
f'error while attempting to bind on '
f'address {laddr!r}: '
f'{exc.strerror.lower()}'
)
exc = OSError(exc.errno, msg)
my_exceptions.append(exc)
else:
raise my_exceptions.pop()
await self.sock_connect(sock, address)
return sock
except OSError as exc:
my_exceptions.append(exc)
if sock is not None:
sock.close()
raise
except:
if sock is not None:
sock.close()
raise
async def create_connection(
self, protocol_factory, host=None, port=None,
*, ssl=None, family=0,
proto=0, flags=0, sock=None,
local_addr=None, server_hostname=None,
ssl_handshake_timeout=None,
happy_eyeballs_delay=None, interleave=None):
if server_hostname is not None and not ssl:
raise ValueError('server_hostname is only meaningful with ssl')
if server_hostname is None and ssl:
if not host:
raise ValueError('You must set server_hostname '
'when using ssl without a host')
server_hostname = host
if ssl_handshake_timeout is not None and not ssl:
raise ValueError(
'ssl_handshake_timeout is only meaningful with ssl')
if happy_eyeballs_delay is not None and interleave is None:
# If using happy eyeballs, default to interleave addresses by family
interleave = 1
if host is not None or port is not None:
if sock is not None:
raise ValueError(
'host/port and sock can not be specified at the same time')
infos = await self._ensure_resolved(
(host, port), family=family,
type=socket.SOCK_STREAM, proto=proto, flags=flags, loop=self)
if not infos:
raise OSError('getaddrinfo() returned empty list')
if local_addr is not None:
laddr_infos = await self._ensure_resolved(
local_addr, family=family,
type=socket.SOCK_STREAM, proto=proto,
flags=flags, loop=self)
if not laddr_infos:
raise OSError('getaddrinfo() returned empty list')
else:
laddr_infos = None
if interleave:
infos = _interleave_addrinfos(infos, interleave)
exceptions = []
if happy_eyeballs_delay is None:
# not using happy eyeballs
for addrinfo in infos:
try:
sock = await self._connect_sock(
exceptions, addrinfo, laddr_infos)
break
except OSError:
continue
else: # using happy eyeballs
sock, _, _ = await staggered.staggered_race(
(functools.partial(self._connect_sock,
exceptions, addrinfo, laddr_infos)
for addrinfo in infos),
happy_eyeballs_delay, loop=self)
if sock is None:
exceptions = [exc for sub in exceptions for exc in sub]
if len(exceptions) == 1:
raise exceptions[0]
else:
# If they all have the same str(), raise one.
model = str(exceptions[0])
if all(str(exc) == model for exc in exceptions):
raise exceptions[0]
# Raise a combined exception so the user can see all
# the various error messages.
raise OSError('Multiple exceptions: {}'.format(
', '.join(str(exc) for exc in exceptions)))
else:
if sock is None:
raise ValueError(
'host and port was not specified and no sock specified')
if sock.type != socket.SOCK_STREAM:
# We allow AF_INET, AF_INET6, AF_UNIX as long as they
# are SOCK_STREAM.
# We support passing AF_UNIX sockets even though we have
# a dedicated API for that: create_unix_connection.
# Disallowing AF_UNIX in this method, breaks backwards
# compatibility.
raise ValueError(
f'A Stream Socket was expected, got {sock!r}')
transport, protocol = await self._create_connection_transport(
sock, protocol_factory, ssl, server_hostname,
ssl_handshake_timeout=ssl_handshake_timeout)
if self._debug:
# Get the socket from the transport because SSL transport closes
# the old socket and creates a new SSL socket
sock = transport.get_extra_info('socket')
logger.debug("%r connected to %s:%r: (%r, %r)",
sock, host, port, transport, protocol)
return transport, protocol
async def _create_connection_transport(
self, sock, protocol_factory, ssl,
server_hostname, server_side=False,
ssl_handshake_timeout=None):
sock.setblocking(False)
protocol = protocol_factory()
waiter = self.create_future()
if ssl:
sslcontext = None if isinstance(ssl, bool) else ssl
transport = self._make_ssl_transport(
sock, protocol, sslcontext, waiter,
server_side=server_side, server_hostname=server_hostname,
ssl_handshake_timeout=ssl_handshake_timeout)
else:
transport = self._make_socket_transport(sock, protocol, waiter)
try:
await waiter
except:
transport.close()
raise
return transport, protocol
async def sendfile(self, transport, file, offset=0, count=None,
*, fallback=True):
if transport.is_closing():
raise RuntimeError("Transport is closing")
mode = getattr(transport, '_sendfile_compatible',
constants._SendfileMode.UNSUPPORTED)
if mode is constants._SendfileMode.UNSUPPORTED:
raise RuntimeError(
f"sendfile is not supported for transport {transport!r}")
if mode is constants._SendfileMode.TRY_NATIVE:
try:
return await self._sendfile_native(transport, file,
offset, count)
except exceptions.SendfileNotAvailableError as exc:
if not fallback:
raise
if not fallback:
raise RuntimeError(
f"fallback is disabled and native sendfile is not "
f"supported for transport {transport!r}")
return await self._sendfile_fallback(transport, file,
offset, count)
async def _sendfile_native(self, transp, file, offset, count):
raise exceptions.SendfileNotAvailableError(
"sendfile syscall is not supported")
async def _sendfile_fallback(self, transp, file, offset, count):
if offset:
file.seek(offset)
blocksize = min(count, 16384) if count else 16384
buf = bytearray(blocksize)
total_sent = 0
proto = _SendfileFallbackProtocol(transp)
try:
while True:
if count:
blocksize = min(count - total_sent, blocksize)
if blocksize <= 0:
return total_sent
view = memoryview(buf)[:blocksize]
read = await self.run_in_executor(None, file.readinto, view)
if not read:
return total_sent # EOF
await proto.drain()
transp.write(view[:read])
total_sent += read
finally:
if total_sent > 0 and hasattr(file, 'seek'):
file.seek(offset + total_sent)
await proto.restore()
async def start_tls(self, transport, protocol, sslcontext, *,
server_side=False,
server_hostname=None,
ssl_handshake_timeout=None):
if ssl is None:
raise RuntimeError('Python ssl module is not available')
if not isinstance(sslcontext, ssl.SSLContext):
raise TypeError(
f'sslcontext is expected to be an instance of ssl.SSLContext, '
f'got {sslcontext!r}')
if not getattr(transport, '_start_tls_compatible', False):
raise TypeError(
f'transport {transport!r} is not supported by start_tls()')
waiter = self.create_future()
ssl_protocol = sslproto.SSLProtocol(
self, protocol, sslcontext, waiter,
server_side, server_hostname,
ssl_handshake_timeout=ssl_handshake_timeout,
call_connection_made=False)
# Pause early so that "ssl_protocol.data_received()" doesn't
transport.pause_reading()
transport.set_protocol(ssl_protocol)
conmade_cb = self.call_soon(ssl_protocol.connection_made, transport)
resume_cb = self.call_soon(transport.resume_reading)
try:
await waiter
except BaseException:
transport.close()
conmade_cb.cancel()
resume_cb.cancel()
raise
return ssl_protocol._app_transport
async def create_datagram_endpoint(self, protocol_factory,
local_addr=None, remote_addr=None, *,
family=0, proto=0, flags=0,
reuse_address=_unset, reuse_port=None,
allow_broadcast=None, sock=None):
if sock is not None:
if sock.type != socket.SOCK_DGRAM:
raise ValueError(
f'A UDP Socket was expected, got {sock!r}')
if (local_addr or remote_addr or
family or proto or flags or
reuse_port or allow_broadcast):
opts = dict(local_addr=local_addr, remote_addr=remote_addr,
family=family, proto=proto, flags=flags,
reuse_address=reuse_address, reuse_port=reuse_port,
allow_broadcast=allow_broadcast)
problems = ', '.join(f'{k}={v}' for k, v in opts.items() if v)
raise ValueError(
f'socket modifier keyword arguments can not be used '
f'when sock is specified. ({problems})')
sock.setblocking(False)
r_addr = None
else:
if not (local_addr or remote_addr):
if family == 0:
raise ValueError('unexpected address family')
addr_pairs_info = (((family, proto), (None, None)),)
elif hasattr(socket, 'AF_UNIX') and family == socket.AF_UNIX:
for addr in (local_addr, remote_addr):
if addr is not None and not isinstance(addr, str):
raise TypeError('string is expected')
if local_addr and local_addr[0] not in (0, '\x00'):
try:
if stat.S_ISSOCK(os.stat(local_addr).st_mode):
os.remove(local_addr)
except FileNotFoundError:
pass
except OSError as err:
logger.error('Unable to check or remove stale UNIX '
'socket %r: %r',
local_addr, err)
addr_pairs_info = (((family, proto),
(local_addr, remote_addr)), )
else:
addr_infos = {}
for idx, addr in ((0, local_addr), (1, remote_addr)):
if addr is not None:
assert isinstance(addr, tuple) and len(addr) == 2, (
'2-tuple is expected')
infos = await self._ensure_resolved(
addr, family=family, type=socket.SOCK_DGRAM,
proto=proto, flags=flags, loop=self)
if not infos:
raise OSError('getaddrinfo() returned empty list')
for fam, _, pro, _, address in infos:
key = (fam, pro)
if key not in addr_infos:
addr_infos[key] = [None, None]
addr_infos[key][idx] = address
addr_pairs_info = [
(key, addr_pair) for key, addr_pair in addr_infos.items()
if not ((local_addr and addr_pair[0] is None) or
(remote_addr and addr_pair[1] is None))]
if not addr_pairs_info:
raise ValueError('can not get address information')
exceptions = []
if reuse_address is not _unset:
if reuse_address:
raise ValueError("Passing `reuse_address=True` is no "
"longer supported, as the usage of "
"SO_REUSEPORT in UDP poses a significant "
"security concern.")
else:
warnings.warn("The *reuse_address* parameter has been "
"deprecated as of 3.5.10 and is scheduled "
"for removal in 3.11.", DeprecationWarning,
stacklevel=2)
for ((family, proto),
(local_address, remote_address)) in addr_pairs_info:
sock = None
r_addr = None
try:
sock = socket.socket(
family=family, type=socket.SOCK_DGRAM, proto=proto)
if reuse_port:
_set_reuseport(sock)
if allow_broadcast:
sock.setsockopt(
socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
sock.setblocking(False)
if local_addr:
sock.bind(local_address)
if remote_addr:
if not allow_broadcast:
await self.sock_connect(sock, remote_address)
r_addr = remote_address
except OSError as exc:
if sock is not None:
sock.close()
exceptions.append(exc)
except:
if sock is not None:
sock.close()
raise
else:
break
else:
raise exceptions[0]
protocol = protocol_factory()
waiter = self.create_future()
transport = self._make_datagram_transport(
sock, protocol, r_addr, waiter)
if self._debug:
if local_addr:
logger.info("Datagram endpoint local_addr=%r remote_addr=%r "
"created: (%r, %r)",
local_addr, remote_addr, transport, protocol)
else:
logger.debug("Datagram endpoint remote_addr=%r created: "
"(%r, %r)",
remote_addr, transport, protocol)
try:
await waiter
except:
transport.close()
raise
return transport, protocol
async def _ensure_resolved(self, address, *,
family=0, type=socket.SOCK_STREAM,
proto=0, flags=0, loop):
host, port = address[:2]
info = _ipaddr_info(host, port, family, type, proto, *address[2:])
if info is not None:
return [info]
else:
return await loop.getaddrinfo(host, port, family=family, type=type,
proto=proto, flags=flags)
async def _create_server_getaddrinfo(self, host, port, family, flags):
infos = await self._ensure_resolved((host, port), family=family,
type=socket.SOCK_STREAM,
flags=flags, loop=self)
if not infos:
raise OSError(f'getaddrinfo({host!r}) returned empty list')
return infos
async def create_server(
self, protocol_factory, host=None, port=None,
*,
family=socket.AF_UNSPEC,
flags=socket.AI_PASSIVE,
sock=None,
backlog=100,
ssl=None,
reuse_address=None,
reuse_port=None,
ssl_handshake_timeout=None,
start_serving=True):
if isinstance(ssl, bool):
raise TypeError('ssl argument must be an SSLContext or None')
if ssl_handshake_timeout is not None and ssl is None:
raise ValueError(
'ssl_handshake_timeout is only meaningful with ssl')
if host is not None or port is not None:
if sock is not None:
raise ValueError(
'host/port and sock can not be specified at the same time')
if reuse_address is None:
reuse_address = os.name == 'posix' and sys.platform != 'cygwin'
sockets = []
if host == '':
hosts = [None]
elif (isinstance(host, str) or
not isinstance(host, collections.abc.Iterable)):
hosts = [host]
else:
hosts = host
fs = [self._create_server_getaddrinfo(host, port, family=family,
flags=flags)
for host in hosts]
infos = await tasks.gather(*fs)
infos = set(itertools.chain.from_iterable(infos))
completed = False
try:
for res in infos:
af, socktype, proto, canonname, sa = res
try:
sock = socket.socket(af, socktype, proto)
except socket.error:
if self._debug:
logger.warning('create_server() failed to create '
'socket.socket(%r, %r, %r)',
af, socktype, proto, exc_info=True)
continue
sockets.append(sock)
if reuse_address:
sock.setsockopt(
socket.SOL_SOCKET, socket.SO_REUSEADDR, True)
if reuse_port:
_set_reuseport(sock)
# Disable IPv4/IPv6 dual stack support (enabled by
# default on Linux) which makes a single socket
# listen on both address families.
if (_HAS_IPv6 and
af == socket.AF_INET6 and
hasattr(socket, 'IPPROTO_IPV6')):
sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_V6ONLY,
True)
try:
sock.bind(sa)
except OSError as err:
raise OSError(err.errno, 'error while attempting '
'to bind on address %r: %s'
% (sa, err.strerror.lower())) from None
completed = True
finally:
if not completed:
for sock in sockets:
sock.close()
else:
if sock is None:
raise ValueError('Neither host/port nor sock were specified')
if sock.type != socket.SOCK_STREAM:
raise ValueError(f'A Stream Socket was expected, got {sock!r}')
sockets = [sock]
for sock in sockets:
sock.setblocking(False)
server = Server(self, sockets, protocol_factory,
ssl, backlog, ssl_handshake_timeout)
if start_serving:
server._start_serving()
# Skip one loop iteration so that all 'loop.add_reader'
# go through.
await tasks.sleep(0)
if self._debug:
logger.info("%r is serving", server)
return server
async def connect_accepted_socket(
self, protocol_factory, sock,
*, ssl=None,
ssl_handshake_timeout=None):
if sock.type != socket.SOCK_STREAM:
raise ValueError(f'A Stream Socket was expected, got {sock!r}')
if ssl_handshake_timeout is not None and not ssl:
raise ValueError(
'ssl_handshake_timeout is only meaningful with ssl')
transport, protocol = await self._create_connection_transport(
sock, protocol_factory, ssl, '', server_side=True,
ssl_handshake_timeout=ssl_handshake_timeout)
if self._debug:
# Get the socket from the transport because SSL transport closes
# the old socket and creates a new SSL socket
sock = transport.get_extra_info('socket')
logger.debug("%r handled: (%r, %r)", sock, transport, protocol)
return transport, protocol
async def connect_read_pipe(self, protocol_factory, pipe):
protocol = protocol_factory()
waiter = self.create_future()
transport = self._make_read_pipe_transport(pipe, protocol, waiter)
try:
await waiter
except:
transport.close()
raise
if self._debug:
logger.debug('Read pipe %r connected: (%r, %r)',
pipe.fileno(), transport, protocol)
return transport, protocol
async def connect_write_pipe(self, protocol_factory, pipe):
protocol = protocol_factory()
waiter = self.create_future()
transport = self._make_write_pipe_transport(pipe, protocol, waiter)
try:
await waiter
except:
transport.close()
raise
if self._debug:
logger.debug('Write pipe %r connected: (%r, %r)',
pipe.fileno(), transport, protocol)
return transport, protocol
def _log_subprocess(self, msg, stdin, stdout, stderr):
info = [msg]
if stdin is not None:
info.append(f'stdin={_format_pipe(stdin)}')
if stdout is not None and stderr == subprocess.STDOUT:
info.append(f'stdout=stderr={_format_pipe(stdout)}')
else:
if stdout is not None:
info.append(f'stdout={_format_pipe(stdout)}')
if stderr is not None:
info.append(f'stderr={_format_pipe(stderr)}')
logger.debug(' '.join(info))
async def subprocess_shell(self, protocol_factory, cmd, *,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=False,
shell=True, bufsize=0,
encoding=None, errors=None, text=None,
**kwargs):
if not isinstance(cmd, (bytes, str)):
raise ValueError("cmd must be a string")
if universal_newlines:
raise ValueError("universal_newlines must be False")
if not shell:
raise ValueError("shell must be True")
if bufsize != 0:
raise ValueError("bufsize must be 0")
if text:
raise ValueError("text must be False")
if encoding is not None:
raise ValueError("encoding must be None")
if errors is not None:
raise ValueError("errors must be None")
protocol = protocol_factory()
debug_log = None
if self._debug:
# don't log parameters: they may contain sensitive information
debug_log = 'run shell command %r' % cmd
self._log_subprocess(debug_log, stdin, stdout, stderr)
transport = await self._make_subprocess_transport(
protocol, cmd, True, stdin, stdout, stderr, bufsize, **kwargs)
if self._debug and debug_log is not None:
logger.info('%s: %r', debug_log, transport)
return transport, protocol
async def subprocess_exec(self, protocol_factory, program, *args,
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, universal_newlines=False,
shell=False, bufsize=0,
encoding=None, errors=None, text=None,
**kwargs):
if universal_newlines:
raise ValueError("universal_newlines must be False")
if shell:
raise ValueError("shell must be False")
if bufsize != 0:
raise ValueError("bufsize must be 0")
if text:
raise ValueError("text must be False")
if encoding is not None:
raise ValueError("encoding must be None")
if errors is not None:
raise ValueError("errors must be None")
popen_args = (program,) + args
protocol = protocol_factory()
debug_log = None
if self._debug:
# (password) and may be too long
debug_log = f'execute program {program!r}'
self._log_subprocess(debug_log, stdin, stdout, stderr)
transport = await self._make_subprocess_transport(
protocol, popen_args, False, stdin, stdout, stderr,
bufsize, **kwargs)
if self._debug and debug_log is not None:
logger.info('%s: %r', debug_log, transport)
return transport, protocol
def get_exception_handler(self):
return self._exception_handler
def set_exception_handler(self, handler):
if handler is not None and not callable(handler):
raise TypeError(f'A callable object or None is expected, '
f'got {handler!r}')
self._exception_handler = handler
def default_exception_handler(self, context):
message = context.get('message')
if not message:
message = 'Unhandled exception in event loop'
exception = context.get('exception')
if exception is not None:
exc_info = (type(exception), exception, exception.__traceback__)
else:
exc_info = False
if ('source_traceback' not in context and
self._current_handle is not None and
self._current_handle._source_traceback):
context['handle_traceback'] = \
self._current_handle._source_traceback
log_lines = [message]
for key in sorted(context):
if key in {'message', 'exception'}:
continue
value = context[key]
if key == 'source_traceback':
tb = ''.join(traceback.format_list(value))
value = 'Object created at (most recent call last):\n'
value += tb.rstrip()
elif key == 'handle_traceback':
tb = ''.join(traceback.format_list(value))
value = 'Handle created at (most recent call last):\n'
value += tb.rstrip()
else:
value = repr(value)
log_lines.append(f'{key}: {value}')
logger.error('\n'.join(log_lines), exc_info=exc_info)
def call_exception_handler(self, context):
if self._exception_handler is None:
try:
self.default_exception_handler(context)
except (SystemExit, KeyboardInterrupt):
raise
except BaseException:
# Second protection layer for unexpected errors
# in the default implementation, as well as for subclassed
# event loops with overloaded "default_exception_handler".
logger.error('Exception in default exception handler',
exc_info=True)
else:
try:
self._exception_handler(self, context)
except (SystemExit, KeyboardInterrupt):
raise
except BaseException as exc:
# Exception in the user set custom exception handler.
try:
# Let's try default handler.
self.default_exception_handler({
'message': 'Unhandled error in exception handler',
'exception': exc,
'context': context,
})
except (SystemExit, KeyboardInterrupt):
raise
except BaseException:
logger.error('Exception in default exception handler '
'while handling an unexpected error '
'in custom exception handler',
exc_info=True)
def _add_callback(self, handle):
assert isinstance(handle, events.Handle), 'A Handle is required here'
if handle._cancelled:
return
assert not isinstance(handle, events.TimerHandle)
self._ready.append(handle)
def _add_callback_signalsafe(self, handle):
self._add_callback(handle)
self._write_to_self()
def _timer_handle_cancelled(self, handle):
if handle._scheduled:
self._timer_cancelled_count += 1
def _run_once(self):
sched_count = len(self._scheduled)
if (sched_count > _MIN_SCHEDULED_TIMER_HANDLES and
self._timer_cancelled_count / sched_count >
_MIN_CANCELLED_TIMER_HANDLES_FRACTION):
new_scheduled = []
for handle in self._scheduled:
if handle._cancelled:
handle._scheduled = False
else:
new_scheduled.append(handle)
heapq.heapify(new_scheduled)
self._scheduled = new_scheduled
self._timer_cancelled_count = 0
else:
while self._scheduled and self._scheduled[0]._cancelled:
self._timer_cancelled_count -= 1
handle = heapq.heappop(self._scheduled)
handle._scheduled = False
timeout = None
if self._ready or self._stopping:
timeout = 0
elif self._scheduled:
when = self._scheduled[0]._when
timeout = min(max(0, when - self.time()), MAXIMUM_SELECT_TIMEOUT)
event_list = self._selector.select(timeout)
self._process_events(event_list)
end_time = self.time() + self._clock_resolution
while self._scheduled:
handle = self._scheduled[0]
if handle._when >= end_time:
break
handle = heapq.heappop(self._scheduled)
handle._scheduled = False
self._ready.append(handle)
ntodo = len(self._ready)
for i in range(ntodo):
handle = self._ready.popleft()
if handle._cancelled:
continue
if self._debug:
try:
self._current_handle = handle
t0 = self.time()
handle._run()
dt = self.time() - t0
if dt >= self.slow_callback_duration:
logger.warning('Executing %s took %.3f seconds',
_format_handle(handle), dt)
finally:
self._current_handle = None
else:
handle._run()
handle = None
def _set_coroutine_origin_tracking(self, enabled):
if bool(enabled) == bool(self._coroutine_origin_tracking_enabled):
return
if enabled:
self._coroutine_origin_tracking_saved_depth = (
sys.get_coroutine_origin_tracking_depth())
sys.set_coroutine_origin_tracking_depth(
constants.DEBUG_STACK_DEPTH)
else:
sys.set_coroutine_origin_tracking_depth(
self._coroutine_origin_tracking_saved_depth)
self._coroutine_origin_tracking_enabled = enabled
def get_debug(self):
return self._debug
def set_debug(self, enabled):
self._debug = enabled
if self.is_running():
self.call_soon_threadsafe(self._set_coroutine_origin_tracking, enabled)
| true | true |
f726e9f4ca4961a8ea29f9196c6fa380bedb6b8e | 1,865 | py | Python | test/unittest/committee_test.py | Cocos-BCX/Python-Middleware | 9e8db14cdbf12131964d48d1189e0686b69369a8 | [
"MIT"
] | 101 | 2019-07-24T08:30:30.000Z | 2021-02-18T15:11:59.000Z | test/unittest/committee_test.py | marcomgsilva/Python-Middleware | 9e8db14cdbf12131964d48d1189e0686b69369a8 | [
"MIT"
] | 4 | 2019-08-01T10:06:29.000Z | 2019-11-29T08:32:34.000Z | test/unittest/committee_test.py | marcomgsilva/Python-Middleware | 9e8db14cdbf12131964d48d1189e0686b69369a8 | [
"MIT"
] | 7 | 2019-08-11T16:02:41.000Z | 2021-02-11T04:23:51.000Z | import unittest
from config import Config
class CommitteeTestCase(unittest.TestCase):
def testCreateCommittee(self):
params = {
"url": " ",
"account": "1.2.25"
}
gph = Config().gph
try:
print("CreateCommittee:", gph.committee_member_create(**params))
except Exception as e:
print(repr(e))
def testUpdateCommittee(self):
params = {
"work_status": True,
"new_url": "www.1234.com",
"account": "1.2.25"
}
gph = Config().gph
try:
print("UpdateCommittee:", gph.committee_member_update(**params))
except Exception as e:
print(repr(e))
def testApproveCommittee(self):
params = {
"committees": ["testaccount7"],
"vote_type": 0,
"vote_amount": 10,
"vote_asset": "1.3.0",
"account": "1.2.16"
}
gph = Config().gph
try:
print("ApproveCommittee:", gph.approve_committee(**params))
except Exception as e:
print(repr(e))
def testDisApproveCommittee(self):
params = {
"committees": ["testaccount7"],
"vote_type": 0,
"vote_amount": 1,
"vote_asset": "1.3.0",
"account": "1.2.14"
}
gph = Config().gph
try:
print("DisApproveCommittee:", gph.disapprove_committee(**params))
except Exception as e:
print(repr(e))
if __name__ == "__main__":
# case1 = CommitteeTestCase("testCreateCommittee")
# case1()
# case2 = CommitteeTestCase("testUpdateCommittee")
# case2()
case3 = CommitteeTestCase("testApproveCommittee")
case3()
# case4 = CommitteeTestCase("testDisApproveCommittee")
# case4() | 27.835821 | 77 | 0.527614 | import unittest
from config import Config
class CommitteeTestCase(unittest.TestCase):
def testCreateCommittee(self):
params = {
"url": " ",
"account": "1.2.25"
}
gph = Config().gph
try:
print("CreateCommittee:", gph.committee_member_create(**params))
except Exception as e:
print(repr(e))
def testUpdateCommittee(self):
params = {
"work_status": True,
"new_url": "www.1234.com",
"account": "1.2.25"
}
gph = Config().gph
try:
print("UpdateCommittee:", gph.committee_member_update(**params))
except Exception as e:
print(repr(e))
def testApproveCommittee(self):
params = {
"committees": ["testaccount7"],
"vote_type": 0,
"vote_amount": 10,
"vote_asset": "1.3.0",
"account": "1.2.16"
}
gph = Config().gph
try:
print("ApproveCommittee:", gph.approve_committee(**params))
except Exception as e:
print(repr(e))
def testDisApproveCommittee(self):
params = {
"committees": ["testaccount7"],
"vote_type": 0,
"vote_amount": 1,
"vote_asset": "1.3.0",
"account": "1.2.14"
}
gph = Config().gph
try:
print("DisApproveCommittee:", gph.disapprove_committee(**params))
except Exception as e:
print(repr(e))
if __name__ == "__main__":
case3 = CommitteeTestCase("testApproveCommittee")
case3()
| true | true |
f726eaa4291a25a6faf61571bc3ad1b43a3541f2 | 4,011 | py | Python | PhysicsTools/Heppy/python/physicsutils/genutils.py | pasmuss/cmssw | 566f40c323beef46134485a45ea53349f59ae534 | [
"Apache-2.0"
] | null | null | null | PhysicsTools/Heppy/python/physicsutils/genutils.py | pasmuss/cmssw | 566f40c323beef46134485a45ea53349f59ae534 | [
"Apache-2.0"
] | null | null | null | PhysicsTools/Heppy/python/physicsutils/genutils.py | pasmuss/cmssw | 566f40c323beef46134485a45ea53349f59ae534 | [
"Apache-2.0"
] | null | null | null | from PhysicsTools.Heppy.physicsobjects.PhysicsObjects import printOut
from PhysicsTools.Heppy.physicsobjects.PhysicsObjects import GenParticle
def findStatus1Leptons(particle):
'''Returns status 1 e and mu among the particle daughters'''
leptons = []
for i in range( particle.numberOfDaughters() ):
dau = particle.daughter(i)
if dau.status() == 1:
if abs(dau.pdgId())==11 or abs(dau.pdgId())==13:
leptons.append( dau )
else:
continue
else:
leptons = findStatus1Leptons( dau, leptons )
return leptons
def allDaughters(particle, daughters, rank ):
'''Fills daughters with all the daughters of particle.
Recursive function.'''
rank += 1
for i in range( particle.numberOfDaughters() ):
dau = GenParticle(particle.daughter(i))
dau.rank = rank
daughters.append( dau )
daughters = allDaughters( dau, daughters, rank )
return daughters
def bosonToX(particles, bosonType, xType):
bosons = filter(lambda x: x.status()==3 and x.pdgId()==bosonType, particles)
daughters = []
if len(bosons)==0:
return [], False
boson = bosons[0]
daus = []
allDaughters( boson, daus, 0)
xDaus = filter(lambda x: x.status()==3 and abs(x.pdgId())==xType, daus)
# print printOut(xDaus)
return xDaus, True
def isNotHadronicId(pdgId,includeSMLeptons=True):
if abs(pdgId) in [11,12,13,14,15,16]:
return includeSMLeptons
i = (abs(pdgId) % 1000)
return i > 10 and i != 21 and i < 100
def isPromptLepton(lepton, beforeFSR, includeMotherless=True, includeTauDecays=False):
if abs(lepton.pdgId()) not in [11,13,15]:
return False
if lepton.numberOfMothers() == 0:
return includeMotherless;
mom = lepton.mother()
if mom.pdgId() == lepton.pdgId():
if beforeFSR: return False
return isPromptLepton(mom, beforeFSR, includeMotherless, includeTauDecays)
elif abs(mom.pdgId()) == 15:
if not includeTauDecays: return False
return isPromptLepton(mom, beforeFSR, includeMotherless, includeTauDecays)
else:
return isNotHadronicId(mom.pdgId(), includeSMLeptons=False)
def isNotFromHadronicShower(l):
for x in xrange(l.numberOfMothers()):
mom = l.mother(x)
if mom.status() > 2: return True
id = abs(mom.pdgId())
if id > 1000000: return True
if id > 100: return False
if id < 6: return False
if id == 21: return False
if id in [11,12,13,14,15,16]:
if l.status() > 2: return True
return isNotFromHadronicShower(mom)
if id >= 22 and id <= 39: return True
return True
def realGenDaughters(gp,excludeRadiation=True):
"""Get the daughters of a particle, going through radiative X -> X' + a
decays, either including or excluding the radiation among the daughters
e.g. for
X -> X' + a, X' -> b c
realGenDaughters(X, excludeRadiation=True) = { b, c }
realGenDaughters(X, excludeRadiation=False) = { a, b, c }"""
ret = []
for i in xrange(gp.numberOfDaughters()):
dau = gp.daughter(i)
if dau.pdgId() == gp.pdgId():
if excludeRadiation:
return realGenDaughters(dau)
else:
ret += realGenDaughters(dau)
else:
ret.append(dau)
return ret
def realGenMothers(gp):
"""Get the mothers of a particle X going through intermediate X -> X' chains.
e.g. if Y -> X, X -> X' realGenMothers(X') = Y"""
ret = []
for i in xrange(gp.numberOfMothers()):
mom = gp.mother(i)
if mom.pdgId() == gp.pdgId():
ret += realGenMothers(mom)
else:
ret.append(mom)
return ret
def lastGenCopy(gp):
me = gp.pdgId();
for i in xrange(gp.numberOfDaughters()):
if gp.daughter(i).pdgId() == me:
return False
return True
| 33.705882 | 86 | 0.605834 | from PhysicsTools.Heppy.physicsobjects.PhysicsObjects import printOut
from PhysicsTools.Heppy.physicsobjects.PhysicsObjects import GenParticle
def findStatus1Leptons(particle):
leptons = []
for i in range( particle.numberOfDaughters() ):
dau = particle.daughter(i)
if dau.status() == 1:
if abs(dau.pdgId())==11 or abs(dau.pdgId())==13:
leptons.append( dau )
else:
continue
else:
leptons = findStatus1Leptons( dau, leptons )
return leptons
def allDaughters(particle, daughters, rank ):
rank += 1
for i in range( particle.numberOfDaughters() ):
dau = GenParticle(particle.daughter(i))
dau.rank = rank
daughters.append( dau )
daughters = allDaughters( dau, daughters, rank )
return daughters
def bosonToX(particles, bosonType, xType):
bosons = filter(lambda x: x.status()==3 and x.pdgId()==bosonType, particles)
daughters = []
if len(bosons)==0:
return [], False
boson = bosons[0]
daus = []
allDaughters( boson, daus, 0)
xDaus = filter(lambda x: x.status()==3 and abs(x.pdgId())==xType, daus)
return xDaus, True
def isNotHadronicId(pdgId,includeSMLeptons=True):
if abs(pdgId) in [11,12,13,14,15,16]:
return includeSMLeptons
i = (abs(pdgId) % 1000)
return i > 10 and i != 21 and i < 100
def isPromptLepton(lepton, beforeFSR, includeMotherless=True, includeTauDecays=False):
if abs(lepton.pdgId()) not in [11,13,15]:
return False
if lepton.numberOfMothers() == 0:
return includeMotherless;
mom = lepton.mother()
if mom.pdgId() == lepton.pdgId():
if beforeFSR: return False
return isPromptLepton(mom, beforeFSR, includeMotherless, includeTauDecays)
elif abs(mom.pdgId()) == 15:
if not includeTauDecays: return False
return isPromptLepton(mom, beforeFSR, includeMotherless, includeTauDecays)
else:
return isNotHadronicId(mom.pdgId(), includeSMLeptons=False)
def isNotFromHadronicShower(l):
for x in xrange(l.numberOfMothers()):
mom = l.mother(x)
if mom.status() > 2: return True
id = abs(mom.pdgId())
if id > 1000000: return True
if id > 100: return False
if id < 6: return False
if id == 21: return False
if id in [11,12,13,14,15,16]:
if l.status() > 2: return True
return isNotFromHadronicShower(mom)
if id >= 22 and id <= 39: return True
return True
def realGenDaughters(gp,excludeRadiation=True):
ret = []
for i in xrange(gp.numberOfDaughters()):
dau = gp.daughter(i)
if dau.pdgId() == gp.pdgId():
if excludeRadiation:
return realGenDaughters(dau)
else:
ret += realGenDaughters(dau)
else:
ret.append(dau)
return ret
def realGenMothers(gp):
ret = []
for i in xrange(gp.numberOfMothers()):
mom = gp.mother(i)
if mom.pdgId() == gp.pdgId():
ret += realGenMothers(mom)
else:
ret.append(mom)
return ret
def lastGenCopy(gp):
me = gp.pdgId();
for i in xrange(gp.numberOfDaughters()):
if gp.daughter(i).pdgId() == me:
return False
return True
| true | true |
f726ebf3b8c2775c6822150273cdcd7cd4ffc96d | 2,878 | py | Python | factor_tools.py | ericgreveson/projecteuler | 1844bf383fca871b82d88ef1eb3a9b1a0e363054 | [
"Apache-2.0"
] | null | null | null | factor_tools.py | ericgreveson/projecteuler | 1844bf383fca871b82d88ef1eb3a9b1a0e363054 | [
"Apache-2.0"
] | null | null | null | factor_tools.py | ericgreveson/projecteuler | 1844bf383fca871b82d88ef1eb3a9b1a0e363054 | [
"Apache-2.0"
] | null | null | null | from fractions import Fraction
import math
def compute_factors(n):
"""
Return a list of all factors (proper divisors) of a number n, including the factor 1
"""
factors = [1]
for i in range(2, int(math.sqrt(n)) + 1):
if n % i == 0:
factors.append(i)
factors.append(n // i)
return factors
def is_prime(n, prime_cache=None, prime_cache_max=None):
"""
Return true if n is prime (n>1)
If prime_cache is given, it should be a set of consecutive primes from 2 to prime_cache_max
(and prime_cache_max must also be given).
Then if n <= prime_cache_max, this test will use set lookup rather than factorization
"""
# Optimizations to quickly reject known non-primes
if n in [2, 3, 5, 7]:
return True
if (n % 10) not in [1, 3, 7, 9] or n == 1:
return False
if prime_cache and n <= prime_cache_max:
return n in prime_cache
return len(compute_factors(n)) == 1
def next_prime(previous):
"""
Get the next prime after previous
"""
i = previous + 1
while True:
if is_prime(i):
return i
i += 1
def prime_factors(n, primes=None):
"""
Compute all prime factors of a number n
Some prime factors may be repeated e.g. 12 has prime factors [2, 2, 3]
primes: if supplied, primes up to sqrt(n) should be available
"""
if not primes:
primes = get_primes(int(math.sqrt(n)))
factors = []
remainder = n
for prime in primes:
# Divide by the current prime as many times as we can
while remainder % current_prime == 0:
factors.append(current_prime)
remainder //= current_prime
# We can bail out once we've finished factorizing
if remainder == 1:
break
return factors
def get_primes(up_to):
"""
Get all primes up to (but not including) up_to
"""
primes = [2]
while primes[-1] < up_to:
primes.append(next_prime(primes[-1]))
return primes[:-1]
def totient(n, primes):
"""
Compute totient function with precomputed primes
primes must include all (ordered) primes from 2 up to at least n
"""
product = n
for p in primes:
if p > n:
break
if n % p == 0:
product *= (1 - Fraction(1, p))
return product
def get_coprimes(n, primes):
"""
Get list of numbers coprime to n
primes: list of prime numbers up to at least sqrt(n)
"""
factors = set(prime_factors(n, primes))
# Now sieve out the factors
coprime = [True for i in range(n)]
coprime[0] = False
coprime[1] = False
for factor in factors:
for multiplier in range(1, n // factor):
coprime[factor * multiplier] = False
# And we have the coprimes!
return [c for c in coprime if c]
| 25.927928 | 95 | 0.59729 | from fractions import Fraction
import math
def compute_factors(n):
factors = [1]
for i in range(2, int(math.sqrt(n)) + 1):
if n % i == 0:
factors.append(i)
factors.append(n // i)
return factors
def is_prime(n, prime_cache=None, prime_cache_max=None):
if n in [2, 3, 5, 7]:
return True
if (n % 10) not in [1, 3, 7, 9] or n == 1:
return False
if prime_cache and n <= prime_cache_max:
return n in prime_cache
return len(compute_factors(n)) == 1
def next_prime(previous):
i = previous + 1
while True:
if is_prime(i):
return i
i += 1
def prime_factors(n, primes=None):
if not primes:
primes = get_primes(int(math.sqrt(n)))
factors = []
remainder = n
for prime in primes:
while remainder % current_prime == 0:
factors.append(current_prime)
remainder //= current_prime
if remainder == 1:
break
return factors
def get_primes(up_to):
primes = [2]
while primes[-1] < up_to:
primes.append(next_prime(primes[-1]))
return primes[:-1]
def totient(n, primes):
product = n
for p in primes:
if p > n:
break
if n % p == 0:
product *= (1 - Fraction(1, p))
return product
def get_coprimes(n, primes):
factors = set(prime_factors(n, primes))
# Now sieve out the factors
coprime = [True for i in range(n)]
coprime[0] = False
coprime[1] = False
for factor in factors:
for multiplier in range(1, n // factor):
coprime[factor * multiplier] = False
# And we have the coprimes!
return [c for c in coprime if c]
| true | true |
f726ebfcc0be524ce8e65eb0ea66ac8411693e2e | 1,175 | py | Python | course_grader/dao/__init__.py | uw-it-aca/gradepage | 7059d715cc112ad0ecb0e5012f716e525ee7b3bc | [
"Apache-2.0"
] | 1 | 2017-01-29T09:52:06.000Z | 2017-01-29T09:52:06.000Z | course_grader/dao/__init__.py | uw-it-aca/gradepage | 7059d715cc112ad0ecb0e5012f716e525ee7b3bc | [
"Apache-2.0"
] | 287 | 2017-03-09T00:17:20.000Z | 2022-01-08T00:36:34.000Z | course_grader/dao/__init__.py | uw-it-aca/gradepage | 7059d715cc112ad0ecb0e5012f716e525ee7b3bc | [
"Apache-2.0"
] | null | null | null | # Copyright 2021 UW-IT, University of Washington
# SPDX-License-Identifier: Apache-2.0
from django.conf import settings
from django.utils.timezone import (
get_default_timezone, localtime, is_naive, make_aware)
from datetime import datetime
from uw_sws import SWS_DAO, sws_now
from abc import ABC, abstractmethod
def __update_get(self, url, response):
pass
# Replace the SWS _update_get method to prevent tampering with mocked resources
SWS_DAO._update_get = __update_get
def current_datetime():
override_dt = getattr(settings, "CURRENT_DATETIME_OVERRIDE", None)
if override_dt is not None:
return datetime.strptime(override_dt, "%Y-%m-%d %H:%M:%S")
else:
return sws_now()
def display_datetime(dt):
if is_naive(dt):
dt = make_aware(dt, get_default_timezone())
else:
dt = localtime(dt)
return dt.strftime("%B %d at %l:%M %p %Z")
class GradeImportSource(ABC):
true_values = ["1", "y", "yes", "true"]
@abstractmethod
def grades_for_section(self, section, instructor, **kwargs):
pass
def is_true(self, val):
return (val is not None and val.lower() in self.true_values)
| 25.543478 | 79 | 0.700426 |
from django.conf import settings
from django.utils.timezone import (
get_default_timezone, localtime, is_naive, make_aware)
from datetime import datetime
from uw_sws import SWS_DAO, sws_now
from abc import ABC, abstractmethod
def __update_get(self, url, response):
pass
SWS_DAO._update_get = __update_get
def current_datetime():
override_dt = getattr(settings, "CURRENT_DATETIME_OVERRIDE", None)
if override_dt is not None:
return datetime.strptime(override_dt, "%Y-%m-%d %H:%M:%S")
else:
return sws_now()
def display_datetime(dt):
if is_naive(dt):
dt = make_aware(dt, get_default_timezone())
else:
dt = localtime(dt)
return dt.strftime("%B %d at %l:%M %p %Z")
class GradeImportSource(ABC):
true_values = ["1", "y", "yes", "true"]
@abstractmethod
def grades_for_section(self, section, instructor, **kwargs):
pass
def is_true(self, val):
return (val is not None and val.lower() in self.true_values)
| true | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.