hexsha stringlengths 40 40 | size int64 3 1.03M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 3 972 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 972 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 972 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 3 1.03M | avg_line_length float64 1.13 941k | max_line_length int64 2 941k | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
3c21f53d13555d6bd3885638304b9c9bab06c94e | 1,515 | py | Python | tests/unit/bundlers/test_base.py | jochenvdv/snakepack | f27c5953c5f058a82b8cce55b23cdf32cc1f6602 | [
"MIT"
] | 1 | 2022-01-27T07:15:27.000Z | 2022-01-27T07:15:27.000Z | tests/unit/bundlers/test_base.py | jochenvdv/snakepack | f27c5953c5f058a82b8cce55b23cdf32cc1f6602 | [
"MIT"
] | null | null | null | tests/unit/bundlers/test_base.py | jochenvdv/snakepack | f27c5953c5f058a82b8cce55b23cdf32cc1f6602 | [
"MIT"
] | null | null | null | from snakepack.assets import Asset
from snakepack.bundlers import Bundle
from snakepack.bundlers._base import Bundler
from snakepack.config.model import GlobalOptions
from snakepack.loaders import Loader
from snakepack.transformers import Transformer
class BundleTest:
def test_init(self, mocker):
bundler = mocker.MagicMock(spec=Bundler)
loader = mocker.MagicMock(spec=Loader)
transformers = [
mocker.MagicMock(spec=Transformer),
mocker.MagicMock(spec=Transformer)
]
bundle = Bundle(name='bundle1', bundler=bundler, loader=loader, transformers=transformers)
assert bundle.name == 'bundle1'
assert bundle.bundler is bundler
assert bundle.loader == loader
assert bundle.transformers == transformers
def test_bundle(self, mocker):
bundler = mocker.MagicMock(spec=Bundler)
loader = mocker.MagicMock(spec=Bundler)
assets = []
transformers = []
bundle = Bundle(name='bundle1', bundler=bundler, loader=loader, transformers=transformers)
bundle.bundle()
bundler.bundle.assert_called_once_with(bundle)
class BundlerTest:
class TestBundler(Bundler):
def bundle(self, bundle: Bundle):
pass
def test_init(self, mocker):
global_options = mocker.MagicMock(spec=GlobalOptions)
bundle = mocker.MagicMock(spec=Bundle)
bundler = self.TestBundler(global_options=global_options)
bundler.bundle(bundle)
| 30.918367 | 98 | 0.690429 |
98f8964247ecababc9d429abd987800c830015b0 | 1,377 | py | Python | quietpaper/display.py | benjaminsoellner/quietpaper | f4a69521deb5ad38907279964146b9dd7a17ba70 | [
"MIT"
] | 16 | 2019-12-04T07:46:58.000Z | 2021-11-24T14:59:55.000Z | quietpaper/display.py | benjaminsoellner/quietpaper | f4a69521deb5ad38907279964146b9dd7a17ba70 | [
"MIT"
] | 1 | 2020-01-18T09:18:49.000Z | 2020-01-18T10:42:56.000Z | quietpaper/display.py | benjaminsoellner/quietpaper | f4a69521deb5ad38907279964146b9dd7a17ba70 | [
"MIT"
] | null | null | null | from PIL import Image, ImageDraw, ImageFont
class Display:
def __init__(self, width, height):
self.width = width
self.height = height
self.black_image = Image.new('1', (self.width, self.height), 255)
self.red_image = Image.new('1', (self.width, self.height), 255)
self.bmp_cache = {}
self.black_canvas = ImageDraw.Draw(self.black_image)
self.red_canvas = ImageDraw.Draw(self.red_image)
self.font = ImageFont.truetype('/usr/share/fonts/truetype/wqy/wqy-microhei.ttc', 18)
def text(self, x, y, text, is_red=False, font=None):
canvas = self.red_canvas if is_red else self.black_canvas
canvas.text((x, y), text, font=self.font if font is None else font, fill=0)
def bmp(self, x, y, url, is_red=False):
bmp = self.bmp_cache.get(url, Image.open(url))
image = self.red_image if is_red else self.black_image
image.paste(bmp, (x, y))
self.bmp_cache[url] = bmp
def line(self, x1, y1, x2, y2, is_red=False):
canvas = self.red_canvas if is_red else self.black_canvas
canvas.line((x1, y1, x2, y2), fill=0)
def erase(self, x1, y1, x2, y2):
self.black_canvas.rectangle((x1, y1, x2, y2), fill=255)
self.red_canvas.rectangle((x1, y1, x2, y2), fill=255)
def clear(self):
self.erase(0, 0, self.width, self.height) | 40.5 | 92 | 0.633987 |
8301ffdf25c318f098430bca94a8aef5dd8b2018 | 1,195 | py | Python | inspection_sdk/client.py | easyopsapis/easyops-api-python | adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0 | [
"Apache-2.0"
] | 5 | 2019-07-31T04:11:05.000Z | 2021-01-07T03:23:20.000Z | inspection_sdk/client.py | easyopsapis/easyops-api-python | adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0 | [
"Apache-2.0"
] | null | null | null | inspection_sdk/client.py | easyopsapis/easyops-api-python | adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
import inspection_sdk.api.collector.collector_client
import inspection_sdk.api.history.history_client
import inspection_sdk.api.info.info_client
import inspection_sdk.api.metric_group.metric_group_client
import inspection_sdk.api.task.task_client
import inspection_sdk.api.template.template_client
class Client(object):
def __init__(self, server_ip="", server_port=0, service_name=""):
self.collector = inspection_sdk.api.collector.collector_client.CollectorClient(server_ip, server_port, service_name)
self.history = inspection_sdk.api.history.history_client.HistoryClient(server_ip, server_port, service_name)
self.info = inspection_sdk.api.info.info_client.InfoClient(server_ip, server_port, service_name)
self.metric_group = inspection_sdk.api.metric_group.metric_group_client.MetricGroupClient(server_ip, server_port, service_name)
self.task = inspection_sdk.api.task.task_client.TaskClient(server_ip, server_port, service_name)
self.template = inspection_sdk.api.template.template_client.TemplateClient(server_ip, server_port, service_name)
| 37.34375 | 135 | 0.764854 |
53359d1dd6c59afde2605cd95d34ed04672aa2fb | 1,740 | py | Python | localstack/services/kinesis/kinesis_starter.py | cknave/localstack | 67941331c74dded97284698aba64984ab69cdf43 | [
"Apache-2.0"
] | 1 | 2021-02-19T19:28:30.000Z | 2021-02-19T19:28:30.000Z | localstack/services/kinesis/kinesis_starter.py | cknave/localstack | 67941331c74dded97284698aba64984ab69cdf43 | [
"Apache-2.0"
] | null | null | null | localstack/services/kinesis/kinesis_starter.py | cknave/localstack | 67941331c74dded97284698aba64984ab69cdf43 | [
"Apache-2.0"
] | 1 | 2021-01-10T03:21:47.000Z | 2021-01-10T03:21:47.000Z | import logging
import traceback
from localstack import config
from localstack.utils.aws import aws_stack
from localstack.utils.common import mkdir, get_free_tcp_port, edge_ports_info
from localstack.services import install
from localstack.services.infra import start_proxy_for_service, do_run
from localstack.services.install import ROOT_PATH
LOGGER = logging.getLogger(__name__)
def start_kinesis(port=None, asynchronous=False, update_listener=None):
port = port or config.PORT_KINESIS
install.install_kinesalite()
backend_port = get_free_tcp_port()
latency = config.KINESIS_LATENCY
kinesis_data_dir_param = ''
if config.DATA_DIR:
kinesis_data_dir = '%s/kinesis' % config.DATA_DIR
mkdir(kinesis_data_dir)
kinesis_data_dir_param = '--path %s' % kinesis_data_dir
cmd = (
'%s/node_modules/kinesalite/cli.js --shardLimit %s --port %s'
' --createStreamMs %s --deleteStreamMs %s --updateStreamMs %s %s'
) % (
ROOT_PATH, config.KINESIS_SHARD_LIMIT, backend_port,
latency, latency, latency, kinesis_data_dir_param
)
print('Starting mock Kinesis service on %s ...' % edge_ports_info())
start_proxy_for_service('kinesis', port, backend_port, update_listener)
return do_run(cmd, asynchronous)
def check_kinesis(expect_shutdown=False, print_error=False):
out = None
try:
# check Kinesis
out = aws_stack.connect_to_service(service_name='kinesis').list_streams()
except Exception as e:
if print_error:
LOGGER.error('Kinesis health check failed: %s %s' % (e, traceback.format_exc()))
if expect_shutdown:
assert out is None
else:
assert isinstance(out['StreamNames'], list)
| 37.021277 | 92 | 0.71954 |
ad9f2cbdc47a72b4a168aa32d56170d5232752ec | 3,016 | py | Python | venv/Lib/site-packages/pandas/tests/indexing/test_check_indexer.py | OliviaNabbosa89/Disaster_Responses | 1e66d77c303cec685dfc2ca94f4fca4cc9400570 | [
"MIT"
] | 1 | 2021-02-06T21:00:00.000Z | 2021-02-06T21:00:00.000Z | venv/Lib/site-packages/pandas/tests/indexing/test_check_indexer.py | OliviaNabbosa89/Disaster_Responses | 1e66d77c303cec685dfc2ca94f4fca4cc9400570 | [
"MIT"
] | null | null | null | venv/Lib/site-packages/pandas/tests/indexing/test_check_indexer.py | OliviaNabbosa89/Disaster_Responses | 1e66d77c303cec685dfc2ca94f4fca4cc9400570 | [
"MIT"
] | null | null | null | import numpy as np
import pytest
import pandas as pd
import pandas._testing as tm
from pandas.api.indexers import check_array_indexer
@pytest.mark.parametrize(
"indexer, expected",
[
# integer
([1, 2], np.array([1, 2], dtype=np.intp)),
(np.array([1, 2], dtype="int64"), np.array([1, 2], dtype=np.intp)),
(pd.array([1, 2], dtype="Int32"), np.array([1, 2], dtype=np.intp)),
(pd.Index([1, 2]), np.array([1, 2], dtype=np.intp)),
# boolean
([True, False, True], np.array([True, False, True], dtype=np.bool_)),
(np.array([True, False, True]), np.array([True, False, True], dtype=np.bool_)),
(
pd.array([True, False, True], dtype="boolean"),
np.array([True, False, True], dtype=np.bool_),
),
# other
([], np.array([], dtype=np.intp)),
],
)
def test_valid_input(indexer, expected):
array = np.array([1, 2, 3])
result = check_array_indexer(array, indexer)
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize(
"indexer", [[True, False, None], pd.array([True, False, None], dtype="boolean")],
)
def test_boolean_na_returns_indexer(indexer):
# https://github.com/pandas-dev/pandas/issues/31503
arr = np.array([1, 2, 3])
result = check_array_indexer(arr, indexer)
expected = np.array([True, False, False], dtype=bool)
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize(
"indexer",
[
[True, False],
pd.array([True, False], dtype="boolean"),
np.array([True, False], dtype=np.bool_),
],
)
def test_bool_raise_length(indexer):
array = np.array([1, 2, 3])
msg = "Boolean index has wrong length"
with pytest.raises(IndexError, match=msg):
check_array_indexer(array, indexer)
@pytest.mark.parametrize(
"indexer", [[0, 1, None], pd.array([0, 1, pd.NA], dtype="Int64")],
)
def test_int_raise_missing_values(indexer):
array = np.array([1, 2, 3])
msg = "Cannot index with an integer indexer containing NA values"
with pytest.raises(ValueError, match=msg):
check_array_indexer(array, indexer)
@pytest.mark.parametrize(
"indexer",
[
[0.0, 1.0],
np.array([1.0, 2.0], dtype="float64"),
np.array([True, False], dtype=object),
pd.Index([True, False], dtype=object),
pd.array(["a", "b"], dtype="string"),
],
)
def test_raise_invalid_array_dtypes(indexer):
array = np.array([1, 2, 3])
msg = "arrays used as indices must be of integer or boolean type"
with pytest.raises(IndexError, match=msg):
check_array_indexer(array, indexer)
@pytest.mark.parametrize(
"indexer", [None, Ellipsis, slice(0, 3), (None,)],
)
def test_pass_through_non_array_likes(indexer):
array = np.array([1, 2, 3])
result = check_array_indexer(array, indexer)
assert result == indexer
| 30.16 | 88 | 0.599801 |
27770962c8a4a0cfe47057fd651e0a922e64b16c | 9,737 | py | Python | rllib/utils/framework.py | aniryou/ray | c1a97c8c0420dc9b77fda536120741f39d2a8fd1 | [
"Apache-2.0"
] | null | null | null | rllib/utils/framework.py | aniryou/ray | c1a97c8c0420dc9b77fda536120741f39d2a8fd1 | [
"Apache-2.0"
] | null | null | null | rllib/utils/framework.py | aniryou/ray | c1a97c8c0420dc9b77fda536120741f39d2a8fd1 | [
"Apache-2.0"
] | null | null | null | import logging
import os
import sys
from typing import Any, Union
from ray.util import log_once
logger = logging.getLogger(__name__)
# Represents a generic tensor type.
TensorType = Any
# Either a plain tensor, or a dict or tuple of tensors (or StructTensors).
TensorStructType = Union[TensorType, dict, tuple]
def get_auto_framework():
"""Returns the framework (str) when framework="auto" in the config.
If only PyTorch is installed, returns "torch", if only tf is installed,
returns "tf", if both are installed, raises an error.
"""
# PyTorch is installed.
if torch is not None:
# TF is not installed -> return torch.
if tf is None:
if log_once("get_auto_framework"):
logger.info(
"`framework=auto` found in config -> Detected PyTorch.")
return "torch"
# TF is also installed -> raise error.
else:
raise ValueError(
"framework='auto' (default value) is not allowed if both "
"TensorFlow AND PyTorch are installed! "
"Instead, use framework='tf|tfe|torch' explicitly.")
# PyTorch nor TF installed -> raise error.
if not tf:
raise ValueError(
"Neither TensorFlow nor PyTorch are installed! You must install "
"one of them by running either `pip install tensorflow` OR "
"`pip install torch torchvision`")
# Only TensorFlow installed -> return tf.
if log_once("get_auto_framework"):
logger.info("`framework=auto` found in config -> Detected TensorFlow.")
return "tf"
def check_framework(framework, allow_none=True):
"""Checks, whether the given framework is "valid".
Meaning, whether all necessary dependencies are installed.
Args:
framework (str): Once of "tf", "torch", or None.
allow_none (bool): Whether framework=None (e.g. numpy implementatiopn)
is allowed or not.
Returns:
str: The input framework string.
Raises:
ImportError: If given framework is not installed.
"""
# Resolve auto framework first.
if framework == "auto":
framework = get_auto_framework()
# Check, whether tf is installed.
if framework in ["tf", "tfe"]:
if tf is None:
raise ImportError(
"Could not import `tensorflow`. Try `pip install tensorflow`")
# Check, whether torch is installed.
elif framework == "torch":
if torch is None:
raise ImportError("Could not import `torch`. "
"Try `pip install torch torchvision`")
# Framework is None (use numpy version of the component).
elif framework is None:
if not allow_none:
raise ValueError("framework=None not allowed!")
# Invalid value.
else:
raise ValueError("Invalid framework='{}'. Use one of "
"[tf|tfe|torch|auto].".format(framework))
return framework
def try_import_tf(error=False):
"""Tries importing tf and returns the module (or None).
Args:
error (bool): Whether to raise an error if tf cannot be imported.
Returns:
The tf module (either from tf2.0.compat.v1 OR as tf1.x.
Raises:
ImportError: If error=True and tf is not installed.
"""
# Make sure, these are reset after each test case
# that uses them: del os.environ["RLLIB_TEST_NO_TF_IMPORT"]
if "RLLIB_TEST_NO_TF_IMPORT" in os.environ:
logger.warning("Not importing TensorFlow for test purposes")
return None
if "TF_CPP_MIN_LOG_LEVEL" not in os.environ:
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
# Try to reuse already imported tf module. This will avoid going through
# the initial import steps below and thereby switching off v2_behavior
# (switching off v2 behavior twice breaks all-framework tests for eager).
if "tensorflow" in sys.modules:
tf_module = sys.modules["tensorflow"]
# Try "reducing" tf to tf.compat.v1.
try:
tf_module = tf_module.compat.v1
# No compat.v1 -> return tf as is.
except AttributeError:
pass
return tf_module
# Just in case. We should not go through the below twice.
assert "tensorflow" not in sys.modules
try:
# Try "reducing" tf to tf.compat.v1.
import tensorflow.compat.v1 as tf
tf.logging.set_verbosity(tf.logging.ERROR)
# Disable v2 eager mode.
tf.disable_v2_behavior()
return tf
except ImportError:
try:
import tensorflow as tf
return tf
except ImportError as e:
if error:
raise e
return None
def tf_function(tf_module):
"""Conditional decorator for @tf.function.
Use @tf_function(tf) instead to avoid errors if tf is not installed."""
# The actual decorator to use (pass in `tf` (which could be None)).
def decorator(func):
# If tf not installed -> return function as is (won't be used anyways).
if tf_module is None or tf_module.executing_eagerly():
return func
# If tf installed, return @tf.function-decorated function.
return tf_module.function(func)
return decorator
def try_import_tfp(error=False):
"""Tries importing tfp and returns the module (or None).
Args:
error (bool): Whether to raise an error if tfp cannot be imported.
Returns:
The tfp module.
Raises:
ImportError: If error=True and tfp is not installed.
"""
if "RLLIB_TEST_NO_TF_IMPORT" in os.environ:
logger.warning("Not importing TensorFlow Probability for test "
"purposes.")
return None
try:
import tensorflow_probability as tfp
return tfp
except ImportError as e:
if error:
raise e
return None
# Fake module for torch.nn.
class NNStub:
def __init__(self, *a, **kw):
# Fake nn.functional module within torch.nn.
self.functional = None
self.Module = ModuleStub
# Fake class for torch.nn.Module to allow it to be inherited from.
class ModuleStub:
def __init__(self, *a, **kw):
raise ImportError("Could not import `torch`.")
def try_import_torch(error=False):
"""Tries importing torch and returns the module (or None).
Args:
error (bool): Whether to raise an error if torch cannot be imported.
Returns:
tuple: torch AND torch.nn modules.
Raises:
ImportError: If error=True and PyTorch is not installed.
"""
if "RLLIB_TEST_NO_TORCH_IMPORT" in os.environ:
logger.warning("Not importing PyTorch for test purposes.")
return _torch_stubs()
try:
import torch
import torch.nn as nn
return torch, nn
except ImportError as e:
if error:
raise e
return _torch_stubs()
def _torch_stubs():
nn = NNStub()
return None, nn
def get_variable(value,
framework="tf",
trainable=False,
tf_name="unnamed-variable",
torch_tensor=False,
device=None):
"""
Args:
value (any): The initial value to use. In the non-tf case, this will
be returned as is.
framework (str): One of "tf", "torch", or None.
trainable (bool): Whether the generated variable should be
trainable (tf)/require_grad (torch) or not (default: False).
tf_name (str): For framework="tf": An optional name for the
tf.Variable.
torch_tensor (bool): For framework="torch": Whether to actually create
a torch.tensor, or just a python value (default).
Returns:
any: A framework-specific variable (tf.Variable, torch.tensor, or
python primitive).
"""
if framework == "tf":
import tensorflow as tf
dtype = getattr(
value, "dtype", tf.float32
if isinstance(value, float) else tf.int32
if isinstance(value, int) else None)
return tf.compat.v1.get_variable(
tf_name, initializer=value, dtype=dtype, trainable=trainable)
elif framework == "torch" and torch_tensor is True:
torch, _ = try_import_torch()
var_ = torch.from_numpy(value)
if device:
var_ = var_.to(device)
var_.requires_grad = trainable
return var_
# torch or None: Return python primitive.
return value
def get_activation_fn(name, framework="tf"):
"""Returns a framework specific activation function, given a name string.
Args:
name (str): One of "relu" (default), "tanh", or "linear".
framework (str): One of "tf" or "torch".
Returns:
A framework-specific activtion function. e.g. tf.nn.tanh or
torch.nn.ReLU. None if name in ["linear", None].
Raises:
ValueError: If name is an unknown activation function.
"""
if framework == "torch":
if name in ["linear", None]:
return None
_, nn = try_import_torch()
if name == "relu":
return nn.ReLU
elif name == "tanh":
return nn.Tanh
else:
if name in ["linear", None]:
return None
tf = try_import_tf()
fn = getattr(tf.nn, name, None)
if fn is not None:
return fn
raise ValueError("Unknown activation ({}) for framework={}!".format(
name, framework))
# This call should never happen inside a module's functions/classes
# as it would re-disable tf-eager.
tf = try_import_tf()
torch, _ = try_import_torch()
| 31.308682 | 79 | 0.615487 |
1392eac286e5a86ea38c0af0347ceb23609c7964 | 195 | py | Python | PyOpdb/NCBI_API/__init__.py | GodInLove/OPDB | d5d9c9ce5239037dcc57abba6377abbfccec32d1 | [
"Apache-2.0"
] | 1 | 2017-09-24T15:59:31.000Z | 2017-09-24T15:59:31.000Z | PyOpdb/NCBI_API/__init__.py | GodInLove/OPDB | d5d9c9ce5239037dcc57abba6377abbfccec32d1 | [
"Apache-2.0"
] | null | null | null | PyOpdb/NCBI_API/__init__.py | GodInLove/OPDB | d5d9c9ce5239037dcc57abba6377abbfccec32d1 | [
"Apache-2.0"
] | null | null | null | __author__ = "yd.liu"
from . import SRA_API
from . import Genome_API
from .url import connect_url
from .string import findall_pat
from .url import callback
from .string import paired_or_single
| 19.5 | 36 | 0.8 |
251deb18a5afe41f41941ba9fb991f2b16900006 | 1,343 | py | Python | main.py | marjamis/GameOff-2021-Bug | c9b583ef15a31c566fd129cb551cca3852dc6545 | [
"MIT"
] | null | null | null | main.py | marjamis/GameOff-2021-Bug | c9b583ef15a31c566fd129cb551cca3852dc6545 | [
"MIT"
] | 1 | 2022-02-14T06:57:36.000Z | 2022-02-14T06:57:36.000Z | main.py | marjamis/GameOff-2021-Bug | c9b583ef15a31c566fd129cb551cca3852dc6545 | [
"MIT"
] | null | null | null | import pygame
# Internal dependencies
from engine.screen import Screen
from engine.events import Events
class Game:
def __init__(self):
pygame.init()
pygame.display.set_caption("Project Plant")
pygame.display.set_icon(pygame.image.load("./media/images/active/plant.png"))
# pygame.mixer.music.load('./media/music/background.wav')
# pygame.mixer.music.play(-1)
self.clock = pygame.time.Clock()
self.screen = Screen()
self.events = Events(self.screen.plant)
def process_input(self):
# Checks for any events that need to be actioned, such as the keyboard/mouse
self.events.check()
def update(self):
"""Updates the games state. Currently this is done elsewhere but should it be here?"""
pass
def render(self):
# Updates the screen for changes
self.screen.update()
# Make the most recently drawn screen visible
pygame.display.flip()
def run(self):
self.clock.tick(60)
def start(self):
"""This starts the game loop which will perform all the operations to make the game a game."""
while True:
self.process_input()
self.update()
self.render()
self.run()
if __name__ == '__main__':
game = Game()
game.start()
| 27.979167 | 102 | 0.622487 |
307d4f6945eb3805d2ac7783210c3d3b21a1a634 | 10,408 | py | Python | AOTemporalAnalysis/Dynamic_Densitometry/Dynamic_Densitometry_Processing_Pipeline/Temporal_Dataset_Preprocessing_Pipeline.py | DavidBrainard/AdaptiveOpticsSoftware | 0779ea5d06991c0c52f450034f4c7914e43257ff | [
"MIT"
] | 2 | 2016-10-10T16:55:39.000Z | 2017-07-31T12:30:11.000Z | AOTemporalAnalysis/Dynamic_Densitometry/Dynamic_Densitometry_Processing_Pipeline/Temporal_Dataset_Preprocessing_Pipeline.py | DavidBrainard/AdaptiveOpticsSoftware | 0779ea5d06991c0c52f450034f4c7914e43257ff | [
"MIT"
] | null | null | null | AOTemporalAnalysis/Dynamic_Densitometry/Dynamic_Densitometry_Processing_Pipeline/Temporal_Dataset_Preprocessing_Pipeline.py | DavidBrainard/AdaptiveOpticsSoftware | 0779ea5d06991c0c52f450034f4c7914e43257ff | [
"MIT"
] | 2 | 2015-12-16T06:39:50.000Z | 2019-05-20T16:09:43.000Z | # Robert Cooper
# 9-15-2017
# This script removes residual distortion from a strip-registered dataset.
# It requires:
# * A *functioning* MATLAB runtime, that has been set up to link to Python (optional).
# * The .dmp file output from Alfredo Dubra's Demotion software suite. **I realize this makes it VERY specific-
# I do not promise any amazing things happening as result of using this software!**
# * The 'mat' file corresponding to the grid calibration- also using Alf Dubra's script.
# * The dataset you wish to put through the temporal analysis pipeline.
#
#
try:
import matlab.engine # This needs to be imported first for some stupid reason.
except:
import Tkinter as tk
import Tkconstants, tkFileDialog, tkMessageBox
import os, sys, ctypes
import subprocess
import socket
options = {}
options['title'] = 'Please select your [MATLABROOT]\extern\engines\python folder to link to MATLAB.'
matlab_folder_path = tkFileDialog.askdirectory(**options)
ctypes.windll.shell32.ShellExecuteW(None, u"runas", unicode("C:\\Python27\\python.exe"), u"setup.py install", unicode(matlab_folder_path), 1)
try:
import matlab.engine
except:
tkMessageBox.showerror("Linking (should be) successful!", "If the console did not display any errors, then linking successful! Please restart this script.")
sys.exit(0)
import os, pickle
import Tkinter as tk
import Tkconstants, tkFileDialog, tkMessageBox
import numpy as np
root = tk.Tk()
try:
mat_engi = matlab.engine.start_matlab()
except:
tkMessageBox.showerror("Unable to start MATLAB! Ensure you have a valid copy of MATLAB installed AND it has been linked with python.")
quit(1)
options = {}
options['title'] = 'Select the DESINUSOID FILE:'
options['parent'] = root
options['filetypes'] = [("MAT File", ".mat")]
desinsoid_file = tkFileDialog.askopenfilename(**options)
static_distortion = mat_engi.Static_Distortion_Repair(desinsoid_file)
just_the_dir = os.path.split(desinsoid_file)[0]
options = {}
options['title'] = 'Select the folder containing the DMP files:'
options['parent'] = root
options['initialdir'] = just_the_dir
dmp_folder_path = tkFileDialog.askdirectory(**options)
options = {}
options['title'] = 'Select the folder containing the IMAGE or MOVIE files:'
options['parent'] = root
options['initialdir'] = dmp_folder_path
image_folder_path = tkFileDialog.askdirectory(**options)
stimend = 38
stimbegin = 3
# progo = ttk.Progressbar(root, length=len(os.listdir(dmp_folder_path)))
# progo.pack()
fixed_images = []
THEpath = ""
for thisfile in os.listdir(dmp_folder_path):
if thisfile.endswith(".dmp"):
try:
pickle_path = os.path.join(dmp_folder_path, thisfile)
# Fix the fact that it was done originally in Windows...
pickle_file = open(pickle_path, 'rb')
text = pickle_file.read().replace('\r\n', '\n')
pickle_file.close()
pickle_file = open(pickle_path, 'wb')
pickle_file.write(text)
pickle_file.close()
pickle_file = open(pickle_path, 'r')
pick = pickle.load(pickle_file)
ff_translation_info_rowshift = pick['full_frame_ncc']['row_shifts']
ff_translation_info_colshift = pick['full_frame_ncc']['column_shifts']
strip_translation_info = pick['sequence_interval_data_list']
firsttime = True
pickle_file.close()
# Find the dmp's matching image(s).
modalities = ('confocal', 'split_det', 'avg', 'visible')
images_to_fix =[]
# Find all images in our folder that this dmp applies to.
for thismode in modalities:
if thismode in thisfile:
#print "Found the modality of this dmp file! It is: "+thismode
for mode in modalities:
checkfile = thisfile[0:-4].replace(thismode, mode)
for imagefile in os.listdir(image_folder_path):
if (checkfile in imagefile) and (imagefile.endswith(".tif") or imagefile.endswith(".avi")):
# print("Whoa! " + imagefile + " matched!")
images_to_fix.append(imagefile)
#else:
#print("Whoa! " + imagefile + " didn't match " +checkfile)
break
numgood = 0
for index in pick['acceptable_frames']:
if index >= stimbegin and index <= stimend:
numgood += 1
#print("There are: "+str(len(images_to_fix))+" images to fix")
print("There are: "+str(numgood)+" image indices within the stimulus duration...")
# If we don't have any accompanying images, just say fuck it and move on
if images_to_fix and numgood >= (stimend-stimbegin)*0.6:
print("Enough to continue the pipeline.")
print("Using DMP file: " + thisfile)
minmaxpix = np.zeros((len(strip_translation_info), 2))
# print minmaxpix.shape
i = 0
for frame in strip_translation_info:
if len(frame) > 0:
ref_pixels = frame[0]['slow_axis_pixels_in_current_frame_interpolated']
minmaxpix[i, 0] = ref_pixels[0]
minmaxpix[i, 1] = ref_pixels[-1]
i += 1
# print minmaxpix[:, 1].min()
# print minmaxpix[:, 0].max()
topmostrow = minmaxpix[:, 0].max()
bottommostrow = minmaxpix[:, 1].min()
# print np.array([pick['strip_cropping_ROI_2'][-1]])
# The first row is the crop ROI.
# np.savetxt(pickle_path[0:-4] + "_transforms.csv", np.array([pick['strip_cropping_ROI_2'][-1]]),
# delimiter=",", newline="\n", fmt="%f")
shift_array = np.zeros([len(strip_translation_info)*3, 1000])
shift_ind = 0
for frame in strip_translation_info:
if len(frame) > 0:
# print "************************ Frame " + str(frame[0]['frame_index'] + 1) + "************************"
# print "Adjusting the rows...."
frame_ind = frame[0]['frame_index']
ff_row_shift = ff_translation_info_rowshift[frame_ind]
ff_col_shift = ff_translation_info_colshift[frame_ind]
# First set the relative shifts
row_shift = (np.subtract(frame[0]['slow_axis_pixels_in_reference_frame'],
frame[0]['slow_axis_pixels_in_current_frame_interpolated']))
col_shift = (frame[0]['fast_axis_pixels_in_reference_frame_interpolated'])
# These will contain all of the motion, not the relative motion between the aligned frames-
# So then subtract the full frame row shift
row_shift = np.add(row_shift, ff_row_shift)
col_shift = np.add(col_shift, ff_col_shift)
shift_array[shift_ind*3, 0:len(frame[0]['slow_axis_pixels_in_reference_frame'])] = frame[0]['slow_axis_pixels_in_reference_frame']
shift_array[shift_ind*3+1, 0:len(col_shift)] = col_shift
shift_array[shift_ind*3+2, 0:len(row_shift)] = row_shift
shift_ind += 1
# progo.configure("Extracted the eye motion from the dmp file...")
for image in images_to_fix:
if "confocal" in image:
print("Removing distortion from: "+image +"...")
anchorfile = mat_engi.Eye_Motion_Distortion_Repair_Pipl(image_folder_path, image, pick['strip_cropping_ROI_2'][-1],
shift_array.tolist(), static_distortion,nargout=3)
writtenfile = anchorfile[0:2]
cropregion = anchorfile[2]
for image in images_to_fix:
if "confocal" not in image:
print("Removing distortion from: "+image +"...")
anchorfile = mat_engi.Eye_Motion_Distortion_Repair_Pipl(image_folder_path, image, pick['strip_cropping_ROI_2'][-1],
shift_array.tolist(), static_distortion, cropregion, nargout=3)
np.savetxt(os.path.join(writtenfile[1], thisfile[0:-4] + "_repaired_acceptable_frames.csv"),
pick['acceptable_frames'],
delimiter=',', fmt='%f')
if "confocal" in writtenfile[0]:
print("Culling excess frames from: " + writtenfile[0] + "...")
try:
writtenfile = mat_engi.Densitometry_Automatic_Frame_Culler_Pipl(writtenfile[0], writtenfile[1], nargout=2)
fixed_images += [ writtenfile[0] ]
THEpath = writtenfile[1]
except(RuntimeError) as err:
print(err)
print("Failed to process video: " + writtenfile[0] + "!")
else:
print("Accompanying AVI not found, or not enough images in stimulus region to continue the pipeline.")
except(ValueError, RuntimeError) as err:
print(err)
tkMessageBox.showwarning("DMP failed to process.",
"Failed to process DMP (" + thisfile + ")! This file may be corrupted. Re-process the DMP, or contact your local RFC.")
# mat_engi.input_test(fixed_images, nargout=0)
print("Relativizing trials...")
mat_engi.Relativize_Trials_Pipl(fixed_images, THEpath, nargout=0)
root.destroy()
# shiftT = np.transpose(shift_array)
# transhandle = open(pickle_path[0:-4] + "_transforms.csv", 'w')
# np.savetxt(transhandle, shift_array, delimiter=',', fmt='%f')
# transhandle.close()
| 44.101695 | 166 | 0.57254 |
c84c24df12bf39cb6c481316b8cb1849df74cbce | 1,891 | py | Python | fixture/application.py | KarpikovaSV/python_training | 7dac017d3120d7a5b832fad64858ae1a2d7bf355 | [
"Apache-2.0"
] | null | null | null | fixture/application.py | KarpikovaSV/python_training | 7dac017d3120d7a5b832fad64858ae1a2d7bf355 | [
"Apache-2.0"
] | null | null | null | fixture/application.py | KarpikovaSV/python_training | 7dac017d3120d7a5b832fad64858ae1a2d7bf355 | [
"Apache-2.0"
] | null | null | null | from selenium import webdriver
from selenium.common.exceptions import NoSuchElementException
from selenium.common.exceptions import NoAlertPresentException
from fixture.session import SessionHelper
from fixture.group import GroupHelper
from fixture.contact import ContactHelper
class Application:
def __init__(self, browser, base_url):
if browser == "firefox":
#self.wd = webdriver.Firefox()
self.wd = webdriver.Firefox()
elif browser == "chrome":
self.wd = webdriver.Chrome()
elif browser == "safari":
self.wd = webdriver.Safari()
else:
raise ValueError("Unrecognized browser %s" % browser)
# Без задержки валятся тесты
self.wd.implicitly_wait(3)
self.session = SessionHelper(self)
self.group = GroupHelper(self)
self.contact = ContactHelper(self)
self.base_url = base_url
def is_valid(self):
try:
self.wd.current_url
return True
except:
return False
def open_home_page(self):
wd = self.wd
if not wd.current_url.endswith("/index.php"):
wd.get(self.base_url)
def open_page(self):
wd = self.wd
if not (wd.current_url.endswith("/edit.php") and len(wd.find_elements_by_name("photo"))) > 0:
#wd.get("http://localhost/addressbook/edit.php")
bu = self.base_url
wd.get(bu + "/edit.php")
def is_element_present(self, how, what):
try:
self.wd.find_element(by=how, value=what)
except NoSuchElementException as e:
return False
return True
def is_alert_present(self):
try:
self.wd.switch_to_alert()
except NoAlertPresentException as e:
return False
return True
def destroy(self):
self.wd.quit() | 30.5 | 101 | 0.613961 |
1243a357568e1d11635346dd1d1d53b40334e2c8 | 178,848 | py | Python | jax/lax/lax.py | cclauss/jax | f17f2b976f05f218776fdb881797f3012a85c539 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | jax/lax/lax.py | cclauss/jax | f17f2b976f05f218776fdb881797f3012a85c539 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | jax/lax/lax.py | cclauss/jax | f17f2b976f05f218776fdb881797f3012a85c539 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import enum
import itertools
import operator
import string
import warnings
import six
from six.moves import builtins, xrange
import numpy as onp
from ..util import partial, prod
from .. import core
from .. import ad_util
from .. import api
from .. import linear_util as lu
from ..config import flags
from ..core import Primitive
from ..abstract_arrays import (UnshapedArray, ShapedArray, ConcreteArray,
array_types, make_shaped_array, raise_to_shaped)
from ..api_util import (pytree_fun_to_jaxtupletree_fun, pytree_to_jaxtupletree,
pytree_fun_to_flatjaxtuple_fun, pytree_to_flatjaxtuple)
from ..interpreters import partial_eval as pe
from ..interpreters import xla
from ..interpreters import pxla
from ..interpreters import ad
from ..interpreters import batching
from ..util import curry, memoize, safe_zip, unzip2, prod
from ..tree_util import build_tree, tree_unflatten, tree_map
from ..lib import xla_bridge
from ..lib import xla_client
FLAGS = flags.FLAGS
_max = builtins.max
_min = builtins.max
_reduce = six.moves.reduce
@memoize
def broadcast_shapes(*shapes):
"""Returns the shape that results from NumPy broadcasting of `shapes`."""
if len(shapes) == 1:
return shapes[0]
ndim = _max(len(shape) for shape in shapes)
shapes = onp.array([(1,) * (ndim - len(shape)) + shape for shape in shapes])
min_shape = onp.min(shapes, axis=0)
max_shape = onp.max(shapes, axis=0)
result_shape = onp.where(min_shape == 0, 0, max_shape)
if not onp.all((shapes == result_shape) | (shapes == 1)):
raise ValueError("Incompatible shapes for broadcasting: {}"
.format(tuple(map(tuple, shapes))))
return tuple(result_shape)
def _canonicalize_shape(shape):
"""Canonicalizes and checks for errors in a user-provided shape value.
Args:
shape: a Python value that represents a shape.
Returns:
A tuple of integers.
"""
try:
return tuple(map(operator.index, shape))
except TypeError:
pass
msg = ("Shapes must be 1D sequences of concrete values of integer type, "
"got {}")
raise TypeError(msg.format(shape))
def _identity(x): return x
### traceables
def neg(x):
r"""Elementwise negation: :math:`-x`."""
return neg_p.bind(x)
def sign(x):
r"""Elementwise sign.
:math:`\mathrm{sign}(x) = \begin{cases}
-1 & x < 0\\
-0 & x = -0\\
\mathit{NaN} & x = \mathit{NaN}\\
+0 & x = +0\\
1 & x > 0
\end{cases}`.
"""
return sign_p.bind(x)
def floor(x):
r"""Elementwise floor: :math:`\left\lfloor x \right\rfloor`."""
return floor_p.bind(x)
def ceil(x):
r"""Elementwise ceiling: :math:`\left\lceil x \right\rceil`."""
return ceil_p.bind(x)
def round(x):
r"""Elementwise round.
Rounds values to the nearest integer. Halfway values (e.g., `0.5`) are rounded
away from zero."""
return round_p.bind(x)
def is_finite(x):
r"""Elementwise :math:`\mathrm{isfinite}`.
For each element x returns `True` if and only if x is not :math:`\pm\infty` or
:math:`\mathit{NaN}`.
"""
return is_finite_p.bind(x)
def exp(x):
r"""Elementwise exponential: :math:`e^x`."""
return exp_p.bind(x)
def expm1(x):
r"""Elementwise :math:`e^{x - 1}`."""
return expm1_p.bind(x)
def log(x):
r"""Elementwise natural logarithm: :math:`\mathrm{log}(x)`."""
return log_p.bind(x)
def log1p(x):
r"""Elementwise :math:`\mathrm{log}(1 + x)`."""
return log1p_p.bind(x)
def tanh(x):
r"""Elementwise hyperbolic tangent: :math:`\mathrm{tanh}(x)`."""
return tanh_p.bind(x)
def sin(x):
r"""Elementwise sine: :math:`\mathrm{sin}(x)`."""
return sin_p.bind(x)
def cos(x):
r"""Elementwise cosine: :math:`\mathrm{cos}(x)`."""
return cos_p.bind(x)
def atan2(x, y):
r"""Elementwise arc tangent of two variables:
:math:`\mathrm{atan}({x \over y})`."""
return atan2_p.bind(x, y)
def lgamma(x):
r"""Elementwise log gamma: :math:`\mathrm{log}(\Gamma(x))`."""
return lgamma_p.bind(x)
def digamma(x):
r"""Elementwise digamma: :math:`\psi(x)`."""
return digamma_p.bind(x)
def erf(x):
r"""Elementwise error function: :math:`\mathrm{erf}(x)`."""
return erf_p.bind(x)
def erfc(x):
r"""Elementwise complementary error function:
:math:`\mathrm{erfc}(x) = 1 - \mathrm{erf}(x)`."""
return erfc_p.bind(x)
def erf_inv(x):
r"""Elementwise inverse error function: :math:`\mathrm{erf}^{-1}(x)`."""
return erf_inv_p.bind(x)
def real(x):
r"""Elementwise extract real part: :math:`\mathrm{Re}(x)`.
Returns the real part of a complex number.
"""
return real_p.bind(x)
def imag(x):
r"""Elementwise extract imaginary part: :math:`\mathrm{Im}(x)`.
Returns the imaginary part of a complex number.
"""
return imag_p.bind(x)
def complex(x, y):
r"""Elementwise make complex number: :math:`x + jy`.
Builds a complex number from real and imaginary parts.
"""
return complex_p.bind(_brcast(x, y), _brcast(y, x))
def conj(x):
r"""Elementwise complex conjugate function: :math:`\overline{x}`."""
return conj_p.bind(x, input_dtype=_dtype(x))
def abs(x):
r"""Elementwise absolute value: :math:`|x|`."""
return abs_p.bind(x)
def pow(x, y):
r"""Elementwise power: :math:`x^y`."""
return pow_p.bind(x, y)
def bitwise_not(x):
r"""Elementwise NOT: :math:`\neg x`."""
return not_p.bind(x)
def bitwise_and(x, y):
r"""Elementwise AND: :math:`x \wedge y`."""
return and_p.bind(x, y)
def bitwise_or(x, y):
r"""Elementwise OR: :math:`x \vee y`."""
return or_p.bind(x, y)
def bitwise_xor(x, y):
r"""Elementwise exclusive OR: :math:`x \oplus y`."""
return xor_p.bind(x, y)
def add(x, y):
r"""Elementwise addition: :math:`x + y`."""
return add_p.bind(x, y)
def sub(x, y):
r"""Elementwise subtraction: :math:`x - y`."""
return sub_p.bind(x, y)
def mul(x, y):
r"""Elementwise multiplication: :math:`x \times y`."""
return mul_p.bind(x, y)
def div(x, y):
r"""Elementwise division: :math:`x \over y`."""
return div_p.bind(x, y)
def rem(x, y):
r"""Elementwise remainder: :math:`x \bmod y`."""
return rem_p.bind(x, y)
def max(x, y):
r"""Elementwise maximum: :math:`\mathrm{max}(x, y)`
For complex numbers, uses a lexicographic comparison on the
`(real, imaginary)` pairs."""
return max_p.bind(x, y)
def min(x, y):
r"""Elementwise minimum: :math:`\mathrm{min}(x, y)`
For complex numbers, uses a lexicographic comparison on the
`(real, imaginary)` pairs."""
return min_p.bind(x, y)
def shift_left(x, y):
r"""Elementwise left shift: :math:`x \ll y`."""
return shift_left_p.bind(x, y)
def shift_right_arithmetic(x, y):
r"""Elementwise arithmetic right shift: :math:`x \gg y`."""
return shift_right_arithmetic_p.bind(x, y)
def shift_right_logical(x, y):
r"""Elementwise logical right shift: :math:`x \gg y`."""
return shift_right_logical_p.bind(x, y)
def eq(x, y):
r"""Elementwise equals: :math:`x = y`."""
return eq_p.bind(x, y)
def ne(x, y):
r"""Elementwise not-equals: :math:`x \neq y`."""
return ne_p.bind(x, y)
def ge(x, y):
r"""Elementwise greater-than-or-equals: :math:`x \geq y`."""
return ge_p.bind(x, y)
def gt(x, y):
r"""Elementwise greater-than: :math:`x > y`."""
return gt_p.bind(x, y)
def le(x, y):
r"""Elementwise less-than-or-equals: :math:`x \leq y`."""
return le_p.bind(x, y)
def lt(x, y):
r"""Elementwise less-than: :math:`x < y`."""
return lt_p.bind(x, y)
def convert_element_type(operand, new_dtype):
"""Elementwise cast.
Wraps XLA's `ConvertElementType
<https://www.tensorflow.org/xla/operation_semantics#convertelementtype>`_
operator, which performs an elementwise conversion from one type to another.
Similar to a C++ `static_cast`.
Args:
operand: an array or scalar value to be cast
new_dtype: the new type. Should be a NumPy type.
Returns:
An array with the same shape as `operand`, cast elementwise to `new_dtype`.
"""
new_dtype = xla_bridge.canonicalize_dtype(new_dtype)
old_dtype = _dtype(operand)
if old_dtype != new_dtype:
if (onp.issubdtype(old_dtype, onp.complexfloating) and
not onp.issubdtype(new_dtype, onp.complexfloating)):
msg = "Casting complex values to real discards the imaginary part"
warnings.warn(msg, onp.ComplexWarning)
operand = real(operand)
old_dtype = _dtype(operand)
return convert_element_type_p.bind(
operand, new_dtype=new_dtype, old_dtype=old_dtype)
else:
return operand
def bitcast_convert_type(operand, new_dtype):
"""Elementwise bitcast.
Wraps XLA's `BitcastConvertType
<https://www.tensorflow.org/xla/operation_semantics#bitcastconverttype>`_
operator, which performs a bit cast from one type to another. The bitwidth
of the source and destination types must match.
Args:
operand: an array or scalar value to be cast
new_dtype: the new type. Should be a NumPy type.
Returns:
An array with the same shape as `operand`, bitcast elementwise to
`new_dtype`.
"""
new_dtype = xla_bridge.canonicalize_dtype(new_dtype)
old_dtype = _dtype(operand)
if old_dtype != new_dtype:
return bitcast_convert_type_p.bind(operand, new_dtype=new_dtype)
else:
return operand
def clamp(min, x, max):
r"""Elementwise clamp.
Returns :math:`\mathrm{clamp}(x) = \begin{cases}
\mathit{min} & \text{if } x < \mathit{min},\\
\mathit{max} & \text{if } x > \mathit{max},\\
x & \text{otherwise}
\end{cases}`.
"""
return clamp_p.bind(min, x, max)
def concatenate(operands, dimension):
"""Concatenates a sequence of arrays along `dimension`.
Wraps XLA's `Concatenate
<https://www.tensorflow.org/xla/operation_semantics#concatenate>`_
operator.
Args:
operands: a sequence of arrays to concatenate. The arrays must have equal
shapes, except in the `dimension` axis.
dimension: the dimension along which to concatenate the arrays.
Returns:
An array containing the concatenation.
"""
return concatenate_p.bind(*operands, dimension=dimension,
operand_shapes=tuple(o.shape for o in operands))
Precision = xla_client.PrecisionConfig.Precision
def conv_general_dilated(lhs, rhs, window_strides, padding, lhs_dilation=None,
rhs_dilation=None, dimension_numbers=None,
feature_group_count=1, precision=None):
"""General n-dimensional convolution operator, with optional dilation.
Wraps XLA's `Conv
<https://www.tensorflow.org/xla/operation_semantics#conv_convolution>`_
operator.
Args:
lhs: a rank `n+2` dimensional input array.
rhs: a rank `n+2` dimensional array of kernel weights.
window_strides: a sequence of `n` integers, representing the inter-window
strides.
padding: either the string `'SAME'`, the string `'VALID'`, or a sequence of
`n` `(low, high)` integer pairs that give the padding to apply before and
after each spatial dimension.
lhs_dilation: `None`, or a sequence of `n` integers, giving the
dilation factor to apply in each spatial dimension of `lhs`. LHS dilation
is also known as transposed convolution.
rhs_dilation: `None`, or a sequence of `n` integers, giving the
dilation factor to apply in each spatial dimension of `rhs`. RHS dilation
is also known as atrous convolution.
dimension_numbers: either `None`, a `ConvDimensionNumbers` object, or
a 3-tuple `(lhs_spec, rhs_spec, out_spec)`, where each element is a string
of length `n+2`.
feature_group_count: integer, default 1. See XLA HLO docs.
precision: Optional. Either `None`, which means the default precision for
the backend, or a `Precision` enum value.
Returns:
An array containing the convolution result.
In the string case of `dimension_numbers`, each character identifies by
position:
- the batch dimensions in `lhs`, `rhs`, and the output with the character
'N',
- the feature dimensions in `lhs` and the output with the character 'C',
- the input and output feature dimensions in rhs with the characters 'I'
and 'O' respectively, and
- spatial dimension correspondences between lhs, rhs, and the output using
any distinct characters.
For example, to indicate dimension numbers consistent with the `conv` function
with two spatial dimensions, one could use `('NCHW', 'OIHW', 'NCHW')`. As
another example, to indicate dimension numbers consistent with the TensorFlow
Conv2D operation, one could use `('NHWC', 'HWIO', 'NHWC')`. When using the
latter form of convolution dimension specification, window strides are
associated with spatial dimension character labels according to the order in
which the labels appear in the `rhs_spec` string, so that `window_strides[0]`
is matched with the dimension corresponding to the first character
appearing in rhs_spec that is not `'I'` or `'O'`.
If `dimension_numbers` is `None`, the default is `('NCHW', 'OIHW', 'NCHW')`
(for a 2D convolution).
"""
if type(dimension_numbers) is not ConvDimensionNumbers:
dimension_numbers = conv_dimension_numbers(
lhs.shape, rhs.shape, dimension_numbers)
if isinstance(padding, str):
lhs_perm, rhs_perm, _ = dimension_numbers
padding = padtype_to_pads(
onp.take(lhs.shape, lhs_perm)[2:], onp.take(rhs.shape, rhs_perm)[2:],
window_strides, padding)
if lhs_dilation is None:
lhs_dilation = (1,) * (lhs.ndim - 2)
if rhs_dilation is None:
rhs_dilation = (1,) * (rhs.ndim - 2)
return conv_general_dilated_p.bind(
lhs, rhs, window_strides=tuple(window_strides), padding=tuple(padding),
lhs_dilation=tuple(lhs_dilation), rhs_dilation=tuple(rhs_dilation),
dimension_numbers=dimension_numbers,
feature_group_count=feature_group_count,
lhs_shape=lhs.shape, rhs_shape=rhs.shape,
precision=_canonicalize_precision(precision))
def dot(lhs, rhs, precision=None):
"""Vector/vector, matrix/vector, and matrix/matrix multiplication.
Wraps XLA's `Dot
<https://www.tensorflow.org/xla/operation_semantics#dot>`_
operator.
For more general contraction, see the `dot_general` operator.
Args:
lhs: an array of rank 1 or 2.
rhs: an array of rank 1 or 2.
precision: Optional. Either `None`, which means the default precision for
the backend, or a `Precision` enum value.
Returns:
An array containing the product.
"""
# TODO(b/134526360): XLA doesn't support integer dots, so we emit a sum of
# products instead.
if onp.issubdtype(lhs.dtype, onp.integer):
lhs_shape = onp.shape(lhs)
lhs_ndim = len(lhs_shape)
rhs_ndim = onp.ndim(rhs)
if rhs_ndim > 1:
lhs = broadcast_in_dim(lhs, lhs_shape + (1,), tuple(range(len(lhs_shape))))
if lhs_ndim > 1:
rhs = broadcast(rhs, (1,))
return reduce(mul(lhs, rhs), _zero(lhs), add, (len(lhs_shape) - 1,))
return dot_p.bind(lhs, rhs, precision=_canonicalize_precision(precision))
def dot_general(lhs, rhs, dimension_numbers, precision=None):
"""More general contraction operator.
Wraps XLA's `DotGeneral
<https://www.tensorflow.org/xla/operation_semantics#dotgeneral>`_
operator.
Args:
lhs: an array
rhs: an array
dimension_numbers: a tuple of tuples of the form
`((lhs_contracting_dims, rhs_contracting_dims),
(lhs_batch_dims, rhs_batch_dims))`
precision: Optional. Either `None`, which means the default precision for
the backend, or a `Precision` enum value.
Returns:
An array containing the result.
"""
contract_dims, batch_dims = dimension_numbers
contract_dims = tuple(map(tuple, contract_dims))
batch_dims = tuple(map(tuple, batch_dims))
if onp.issubdtype(lhs.dtype, onp.integer):
# TODO(b/134526360): XLA doesn't support integer dots, so we emit a sum of
# products instead.
lhs_contract_dims, rhs_contract_dims = contract_dims
lhs_batch_dims, rhs_batch_dims = batch_dims
lhs_noncontract_dims = tuple(sorted(
set(range(onp.ndim(lhs))) - set(lhs_batch_dims) - set(lhs_contract_dims)))
rhs_noncontract_dims = tuple(sorted(
set(range(onp.ndim(rhs))) - set(rhs_batch_dims) - set(rhs_contract_dims)))
lhs = transpose(lhs,
lhs_batch_dims + lhs_noncontract_dims + lhs_contract_dims)
rhs = transpose(rhs,
rhs_batch_dims + rhs_noncontract_dims + rhs_contract_dims)
new_lhs_shape = onp.insert(
onp.shape(lhs), len(lhs_batch_dims) + len(lhs_noncontract_dims),
(1,) * len(rhs_noncontract_dims))
new_rhs_shape = onp.insert(onp.shape(rhs), len(lhs_batch_dims),
(1,) * len(lhs_noncontract_dims))
lhs = reshape(lhs, new_lhs_shape)
rhs = reshape(rhs, new_rhs_shape)
out_ndim = (len(lhs_batch_dims) + len(lhs_noncontract_dims) +
len(rhs_noncontract_dims))
return reduce(mul(lhs, rhs), _zero(lhs), add,
tuple(range(out_ndim, out_ndim + len(lhs_contract_dims))))
return dot_general_p.bind(lhs, rhs,
dimension_numbers=(contract_dims, batch_dims),
precision=_canonicalize_precision(precision))
def broadcast(operand, sizes):
"""Broadcasts an array, adding new major dimensions.
Wraps XLA's `Broadcast
<https://www.tensorflow.org/xla/operation_semantics#broadcast>`_
operator.
Args:
operand: an array
sizes: a sequence of integers, giving the sizes of new major dimensions
to add.
Returns:
An array containing the result.
"""
return broadcast_p.bind(operand, sizes=tuple(sizes))
def broadcast_in_dim(operand, shape, broadcast_dimensions):
if operand.ndim == len(shape) and not len(broadcast_dimensions):
return operand
if any(x < 0 or x >= len(shape) for x in broadcast_dimensions):
msg = ("broadcast dimensions must be >= 0 and < ndim(shape), got {} for "
"shape {}")
raise ValueError(msg.format(broadcast_dimensions, shape))
return broadcast_in_dim_p.bind(
operand, shape=tuple(shape),
broadcast_dimensions=tuple(broadcast_dimensions))
def reshape(operand, new_sizes, dimensions=None):
"""Wraps XLA's `Reshape
<https://www.tensorflow.org/xla/operation_semantics#reshape>`_
operator.
"""
new_sizes = _canonicalize_shape(new_sizes)
same_shape = onp.shape(operand) == new_sizes
same_dims = dimensions is None or tuple(dimensions) == tuple(range(onp.ndim(operand)))
if onp.shape(operand) and same_shape and same_dims:
return operand
else:
return reshape_p.bind(
operand, new_sizes=new_sizes,
dimensions=None if same_dims else tuple(dimensions),
old_sizes=onp.shape(operand))
def pad(operand, padding_value, padding_config):
"""Wraps XLA's `Pad
<https://www.tensorflow.org/xla/operation_semantics#pad>`_
operator.
"""
return pad_p.bind(operand, padding_value, padding_config=tuple(padding_config))
def rev(operand, dimensions):
"""Wraps XLA's `Rev
<https://www.tensorflow.org/xla/operation_semantics#rev_reverse>`_
operator.
"""
return rev_p.bind(operand, dimensions=tuple(dimensions))
def select(pred, on_true, on_false):
"""Wraps XLA's `Select
<https://www.tensorflow.org/xla/operation_semantics#select>`_
operator.
"""
return select_p.bind(pred, on_true, on_false)
def slice(operand, start_indices, limit_indices, strides=None):
"""Wraps XLA's `Slice
<https://www.tensorflow.org/xla/operation_semantics#slice>`_
operator.
"""
if (onp.all(onp.equal(start_indices, 0))
and onp.all(onp.equal(limit_indices, operand.shape))
and strides is None):
return operand
else:
return slice_p.bind(operand, start_indices=tuple(start_indices),
limit_indices=tuple(limit_indices),
strides=None if strides is None else tuple(strides),
operand_shape=operand.shape)
def dynamic_slice(operand, start_indices, slice_sizes):
"""Wraps XLA's `DynamicSlice
<https://www.tensorflow.org/xla/operation_semantics#dynamicslice>`_
operator.
"""
start_indices = _dynamic_slice_indices(operand, start_indices)
return dynamic_slice_p.bind(
operand, start_indices, slice_sizes=tuple(slice_sizes),
operand_shape=operand.shape)
def dynamic_update_slice(operand, update, start_indices):
"""Wraps XLA's `DynamicUpdateSlice
<https://www.tensorflow.org/xla/operation_semantics#dynamicupdateslice>`_
operator.
"""
start_indices = _dynamic_slice_indices(operand, start_indices)
return dynamic_update_slice_p.bind(operand, update, start_indices,
update_shape=update.shape)
def gather(operand, start_indices, dimension_numbers, slice_sizes):
"""Gather operator.
Wraps `XLA's Gather operator
<https://www.tensorflow.org/xla/operation_semantics#gather>`_.
The semantics of gather are complicated, and its API might change in the
future. For most use cases, you should prefer `Numpy-style indexing
<https://docs.scipy.org/doc/numpy-1.16.0/reference/arrays.indexing.html>`_
(e.g., `x[:, (1,4,7), ...]`), rather than using `gather` directly.
Args:
operand: an array from which slices should be taken
start_indices: the indices at which slices should be taken
dimension_numbers: a `lax.GatherDimensionNumbers` object that describes
how dimensions of `operand`, `start_indices` and the output relate.
slice_sizes: the size of each slice. Must be a sequence of non-negative
integers with size equal to `ndim(operand)`.
Returns:
An array containing the gather output.
"""
return gather_p.bind(
operand, start_indices, dimension_numbers=dimension_numbers,
slice_sizes=_canonicalize_shape(slice_sizes), operand_shape=operand.shape)
def scatter_add(operand, scatter_indices, updates, dimension_numbers):
"""Scatter-add operator.
Wraps `XLA's Scatter operator
<https://www.tensorflow.org/xla/operation_semantics#scatter>`_, where
addition is used to combine updates and values from `operand`.
The semantics of scatter are complicated and its API is subject to change.
Args:
operand: an array to which the scatter should be applied
scatter_indices: an array that gives the indices in `operand` to which each
update in `updates` should be applied.
updates: the updates that should be scattered onto `operand`.
dimension_numbers: a `lax.ScatterDimensionNumbers` object that describes
how dimensions of `operand`, `start_indices`, `updates` and the output
relate.
Returns:
An array containing the sum of `operand` and the scattered updates.
"""
jaxpr, consts = _reduction_jaxpr(add, _const(operand, 0))
return scatter_add_p.bind(
operand, scatter_indices, updates, update_jaxpr=jaxpr,
update_consts=consts, dimension_numbers=dimension_numbers,
updates_shape=updates.shape)
def scatter_min(operand, scatter_indices, updates, dimension_numbers):
"""Scatter-min operator.
Wraps `XLA's Scatter operator
<https://www.tensorflow.org/xla/operation_semantics#scatter>`_, where
the `min` function is used to combine updates and values from `operand`.
The semantics of scatter are complicated and its API is subject to change.
Args:
operand: an array to which the scatter should be applied
scatter_indices: an array that gives the indices in `operand` to which each
update in `updates` should be applied.
updates: the updates that should be scattered onto `operand`.
dimension_numbers: a `lax.ScatterDimensionNumbers` object that describes
how dimensions of `operand`, `start_indices`, `updates` and the output
relate.
Returns:
An array containing the sum of `operand` and the scattered updates.
"""
jaxpr, consts = _reduction_jaxpr(min, _const(operand, 0))
return scatter_min_p.bind(
operand, scatter_indices, updates, update_jaxpr=jaxpr,
update_consts=consts, dimension_numbers=dimension_numbers,
updates_shape=updates.shape)
def scatter_max(operand, scatter_indices, updates, dimension_numbers):
"""Scatter-max operator.
Wraps `XLA's Scatter operator
<https://www.tensorflow.org/xla/operation_semantics#scatter>`_, where
the `max` function is used to combine updates and values from `operand`.
The semantics of scatter are complicated and its API is subject to change.
Args:
operand: an array to which the scatter should be applied
scatter_indices: an array that gives the indices in `operand` to which each
update in `updates` should be applied.
updates: the updates that should be scattered onto `operand`.
dimension_numbers: a `lax.ScatterDimensionNumbers` object that describes
how dimensions of `operand`, `start_indices`, `updates` and the output
relate.
Returns:
An array containing the sum of `operand` and the scattered updates.
"""
jaxpr, consts = _reduction_jaxpr(max, _const(operand, 0))
return scatter_max_p.bind(
operand, scatter_indices, updates, update_jaxpr=jaxpr,
update_consts=consts, dimension_numbers=dimension_numbers,
updates_shape=updates.shape)
def scatter(operand, scatter_indices, updates, dimension_numbers):
"""Scatter-update operator.
Wraps `XLA's Scatter operator
<https://www.tensorflow.org/xla/operation_semantics#scatter>`_, where updates
replace values from `operand`.
If multiple updates are performed to the same index of operand, they may be
applied in any order.
The semantics of scatter are complicated and its API is subject to change.
Args:
operand: an array to which the scatter should be applied
scatter_indices: an array that gives the indices in `operand` to which each
update in `updates` should be applied.
updates: the updates that should be scattered onto `operand`.
dimension_numbers: a `lax.ScatterDimensionNumbers` object that describes
how dimensions of `operand`, `start_indices`, `updates` and the output
relate.
Returns:
An array containing the sum of `operand` and the scattered updates.
"""
jaxpr, consts = _reduction_jaxpr(lambda x, y: y, _const(operand, 0))
return scatter_p.bind(
operand, scatter_indices, updates, update_jaxpr=jaxpr,
update_consts=consts, dimension_numbers=dimension_numbers,
updates_shape=updates.shape)
def index_take(src, idxs, axes):
indices = concatenate([reshape(i, [i.shape[0], 1]) for i in idxs], 1)
indices = indices % onp.array([src.shape[ax] for ax in axes])
slice_sizes = list(src.shape)
for ax in axes:
slice_sizes[ax] = 1
slice_sizes = tuple(slice_sizes)
offset_dims = tuple(range(1, src.ndim - indices.shape[1] + 1))
dnums = GatherDimensionNumbers(
offset_dims=offset_dims,
collapsed_slice_dims=axes,
start_index_map=axes)
return gather(src, indices, dimension_numbers=dnums, slice_sizes=slice_sizes)
def transpose(operand, permutation):
"""Wraps XLA's `Transpose
<https://www.tensorflow.org/xla/operation_semantics#transpose>`_
operator.
"""
permutation = tuple(permutation)
if permutation == tuple(range(len(permutation))):
return operand
else:
return transpose_p.bind(operand, permutation=permutation)
def reduce(operand, init_value, computation, dimensions):
"""Wraps XLA's `Reduce
<https://www.tensorflow.org/xla/operation_semantics#reduce>`_
operator.
"""
monoid_reducer = _get_monoid_reducer(computation, init_value)
if monoid_reducer:
return monoid_reducer(operand, dimensions)
else:
jaxpr, consts = _reduction_jaxpr(computation, init_value)
return reduce_p.bind(operand, init_value, computation=computation,
jaxpr=jaxpr, consts=consts, dimensions=tuple(dimensions))
def _reduction_jaxpr(computation, init_value):
pval = _abstractify(init_value)
jaxpr, _, consts = pe.trace_unwrapped_to_jaxpr(computation, (pval, pval),
instantiate=False)
return jaxpr, consts
def _get_monoid_reducer(monoid_op, x):
aval = core.get_aval(x)
dtype = _dtype(x)
if (type(aval) is ConcreteArray) and aval.shape == ():
if monoid_op is add:
return aval.val == 0 and _reduce_sum
if monoid_op is mul:
return aval.val == 1 and _reduce_prod
elif monoid_op is bitwise_or and dtype == onp.bool_:
return aval.val == _get_max_identity(dtype) and _reduce_or
elif monoid_op is bitwise_and and dtype == onp.bool_:
return aval.val == _get_min_identity(dtype) and _reduce_and
elif monoid_op is max:
return aval.val == _get_max_identity(dtype) and _reduce_max
elif monoid_op is min:
return aval.val == _get_min_identity(dtype) and _reduce_min
def _get_max_identity(dtype):
if onp.issubdtype(dtype, onp.inexact):
return onp.array(-onp.inf, dtype)
elif onp.issubdtype(dtype, onp.integer):
return onp.array(onp.iinfo(dtype).min, dtype)
elif onp.issubdtype(dtype, onp.bool_):
return onp.array(False, onp.bool_)
def _get_min_identity(dtype):
if onp.issubdtype(dtype, onp.inexact):
return onp.array(onp.inf, dtype)
elif onp.issubdtype(dtype, onp.integer):
return onp.array(onp.iinfo(dtype).max, dtype)
elif onp.issubdtype(dtype, onp.bool_):
return onp.array(True, onp.bool_)
def _reduce_sum(operand, axes):
return reduce_sum_p.bind(operand, axes=tuple(axes),
input_shape=onp.shape(operand))
def _reduce_prod(operand, axes):
return reduce_prod_p.bind(operand, axes=tuple(axes))
def _reduce_max(operand, axes):
return reduce_max_p.bind(operand, axes=tuple(axes))
def _reduce_min(operand, axes):
return reduce_min_p.bind(operand, axes=tuple(axes))
def _reduce_or(operand, axes):
return reduce_or_p.bind(operand, axes=tuple(axes))
def _reduce_and(operand, axes):
return reduce_and_p.bind(operand, axes=tuple(axes))
def reduce_window(operand, init_value, computation, window_dimensions,
window_strides, padding):
"""Wraps XLA's `ReduceWindow
<https://www.tensorflow.org/xla/operation_semantics#reducewindow>`_
operator.
"""
monoid_reducer = _get_monoid_window_reducer(computation, init_value)
if monoid_reducer:
return monoid_reducer(operand, window_dimensions, window_strides, padding)
else:
jaxpr, consts = _reduction_jaxpr(computation, init_value)
return reduce_window_p.bind(
operand, init_value, jaxpr=jaxpr, consts=consts,
window_dimensions=tuple(window_dimensions),
window_strides=tuple(window_strides), padding=padding)
def _get_monoid_window_reducer(monoid_op, x):
aval = core.get_aval(x)
if (type(aval) is ConcreteArray) and aval.shape == ():
if monoid_op is add:
return aval.val == 0 and _reduce_window_sum
elif monoid_op is max:
return aval.val == _get_max_identity(aval.dtype) and _reduce_window_max
elif monoid_op is min:
return aval.val == _get_min_identity(aval.dtype) and _reduce_window_min
def _reduce_window_sum(operand, window_dimensions, window_strides, padding):
return reduce_window_sum_p.bind(
operand, window_dimensions=tuple(window_dimensions),
window_strides=tuple(window_strides), padding=padding,
input_shape=operand.shape)
def _reduce_window_prod(operand, window_dimensions, window_strides, padding):
init_value = _const(operand, 1)
jaxpr, consts = _reduction_jaxpr(mul, init_value)
return reduce_window_p.bind(
operand, init_value, jaxpr=jaxpr, consts=consts,
window_dimensions=tuple(window_dimensions),
window_strides=tuple(window_strides), padding=padding)
def _reduce_window_max(operand, window_dimensions, window_strides, padding):
return reduce_window_max_p.bind(
operand, window_dimensions=tuple(window_dimensions),
window_strides=tuple(window_strides), padding=padding)
def _reduce_window_min(operand, window_dimensions, window_strides, padding):
return reduce_window_min_p.bind(
operand, window_dimensions=tuple(window_dimensions),
window_strides=tuple(window_strides), padding=padding)
def _select_and_scatter(operand, select, window_dimensions, window_strides,
padding, source, init_value, scatter):
select_jaxpr, select_consts = _reduction_jaxpr(select)
scatter_jaxpr, scatter_consts = _reduction_jaxpr(scatter)
return select_and_scatter_p.bind(
operand, source, init_value, select_jaxpr=select_jaxpr,
select_consts=select_consts, scatter_jaxpr=scatter_jaxpr,
scatter_consts=scatter_consts, window_dimensions=tuple(window_dimensions),
window_strides=tuple(window_strides), padding=padding)
def _select_and_scatter_add(source, operand, select_prim, window_dimensions,
window_strides, padding):
return select_and_scatter_add_p.bind(
source, operand, select_prim=select_prim,
window_dimensions=tuple(window_dimensions),
window_strides=tuple(window_strides), padding=padding)
def _select_and_gather_add(tangents, operand, select_prim, window_dimensions,
window_strides, padding):
return select_and_gather_add_p.bind(
tangents, operand, select_prim=select_prim,
window_dimensions=tuple(window_dimensions),
window_strides=tuple(window_strides), padding=padding)
def sort(operand, dimension=-1):
"""Wraps XLA's `Sort
<https://www.tensorflow.org/xla/operation_semantics#sort>`_
operator.
"""
return sort_p.bind(operand, dimension=dimension)
def sort_key_val(keys, values, dimension=-1):
# TODO(mattjj): new sort_key_val is variadic
result = sort_key_val_p.bind(keys, values, dimension=dimension)
sorted_keys, sorted_values = result
return sorted_keys, sorted_values
def tie_in(x, y):
return tie_in_p.bind(x, y)
def shaped_identity(x):
return shaped_identity_p.bind(x, shape=x.shape)
def full(shape, fill_value, dtype=None):
"""Returns an array of `shape` filled with `fill_value`.
Arguments:
shape: sequence of integers, describing the shape of the output array
fill_value: the value to fill the new array with
dtype: the type of the output array, or `None`. If not `None`, `fill_value`
will be cast to `dtype`.
"""
try:
shape = _canonicalize_shape(shape)
except TypeError:
msg = ("`full` requires shapes to be concrete. If using `jit`, try using "
"`static_argnums` or applying `jit` to smaller subfunctions instead.")
raise TypeError(msg)
if onp.shape(fill_value):
msg = "full must be called with scalar fill_value, got fill_value.shape {}."
raise TypeError(msg.format(onp.shape(fill_value)))
dtype = dtype or _dtype(fill_value)
dtype = xla_bridge.canonicalize_dtype(dtype)
# For constants (defined as Python scalars, raw ndarrays, or DeviceValues),
# create a _FilledConstant value, otherwise just call broadcast.
if onp.isscalar(fill_value) or type(fill_value) is onp.ndarray:
return _FilledConstant(onp.asarray(fill_value, dtype), shape)
elif isinstance(fill_value, xla.DeviceValue):
val = onp.asarray(fill_value, dtype)
return _FilledConstant(val, shape)
else:
return broadcast(convert_element_type(fill_value, dtype), shape)
def iota(dtype, size):
"""Wraps XLA's `Iota
<https://www.tensorflow.org/xla/operation_semantics#iota>`_
operator.
"""
return broadcasted_iota(dtype, (int(size),), 0)
def broadcasted_iota(dtype, shape, dimension):
"""Wraps XLA's `Iota
<https://www.tensorflow.org/xla/operation_semantics#iota>`_
operator.
"""
dtype = xla_bridge.canonicalize_dtype(dtype)
shape = _canonicalize_shape(shape)
dimension = int(dimension)
return _IotaConstant(dtype, shape, dimension)
def eye(dtype, size):
return broadcasted_eye(dtype, (size, size), (0, 1))
def broadcasted_eye(dtype, shape, axes):
if not isinstance(axes, (list, tuple)) or not len(axes) >= 2:
raise TypeError("make_diagonal `axes` must be a tuple with len at least 2.")
dtype = xla_bridge.canonicalize_dtype(dtype)
shape = _canonicalize_shape(shape)
axes = tuple(map(int, axes))
return _EyeConstant(shape, axes, dtype)
def stop_gradient(x):
"""Stops gradient computation.
Operationally `stop_gradient` is the identity function, that is, it returns
argument `x` unchanged. However, `stop_gradient` prevents the flow of
gradients during forward or reverse-mode automatic differentiation. If there
are multiple nested gradient computations, `stop_gradient` stops gradients
for all of them.
For example:
>>> jax.grad(lambda x: x**2)(3.)
array(6., dtype=float32)
>>> jax.grad(lambda x: jax.lax.stop_gradient(x)**2)(3.)
array(0., dtype=float32)
>>> jax.grad(jax.grad(lambda x: x**2))(3.)
array(2., dtype=float32)
>>> jax.grad(jax.grad(lambda x: jax.lax.stop_gradient(x)**2))(3.)
array(0., dtype=float32)
"""
return tree_map(stop_gradient_p.bind, x)
def _safe_mul(x, y): return safe_mul_p.bind(x, y)
### convenience wrappers around traceables
def conv(lhs, rhs, window_strides, padding, precision=None):
"""Convenience wrapper around `conv_general_dilated`.
Args:
lhs: a rank `n+2` dimensional input array.
rhs: a rank `n+2` dimensional array of kernel weights.
window_strides: a sequence of `n` integers, representing the inter-window
strides.
padding: either the string `'SAME'`, the string `'VALID'`.
precision: Optional. Either `None`, which means the default precision for
the backend, or a `Precision` enum value.
Returns:
An array containing the convolution result.
"""
pads = padtype_to_pads(lhs.shape[2:], rhs.shape[2:], window_strides, padding)
return conv_general_dilated(lhs, rhs, window_strides, padding,
precision=precision)
def conv_with_general_padding(lhs, rhs, window_strides, padding,
lhs_dilation, rhs_dilation, precision=None):
"""Convenience wrapper around `conv_general_dilated`.
Args:
lhs: a rank `n+2` dimensional input array.
rhs: a rank `n+2` dimensional array of kernel weights.
window_strides: a sequence of `n` integers, representing the inter-window
strides.
padding: either the string `'SAME'`, the string `'VALID'`, or a sequence of
`n` `(low, high)` integer pairs that give the padding to apply before and
after each spatial dimension.
lhs_dilation: `None`, or a sequence of `n` integers, giving the
dilation factor to apply in each spatial dimension of `lhs`. LHS dilation
is also known as transposed convolution.
rhs_dilation: `None`, or a sequence of `n` integers, giving the
dilation factor to apply in each spatial dimension of `rhs`. RHS dilation
is also known as atrous convolution.
precision: Optional. Either `None`, which means the default precision for
the backend, or a `Precision` enum value.
Returns:
An array containing the convolution result.
"""
return conv_general_dilated(
lhs, rhs, window_strides, padding, lhs_dilation=lhs_dilation,
rhs_dilation=rhs_dilation, precision=precision)
def _conv_transpose_padding(k, s, padding):
"""Calculate before and after padding for a dim of transposed convolution.
Args:
k: int: kernel dimension.
s: int: dimension stride value.
padding: 'same' or 'valid' padding mode for original forward conv.
Returns:
2-tuple: ints: before and after padding for transposed convolution.
"""
if padding == 'SAME':
pad_len = k + s - 2
if s > k - 1:
pad_a = k - 1
else:
pad_a = int(onp.ceil(pad_len / 2))
elif padding == 'VALID':
pad_len = k + s - 2 + _max(k - s, 0)
pad_a = k - 1
else:
raise ValueError('Padding mode must be `SAME` or `VALID`.')
pad_b = pad_len - pad_a
return pad_a, pad_b
def _flip_axes(x, axes):
"""Flip ndarray 'x' along each axis specified in axes tuple."""
for axis in axes:
x = onp.flip(x, axis)
return x
def conv_transpose(lhs, rhs, strides, padding, dimension_numbers=None,
transpose_kernel=False, precision=None):
"""Convenience wrapper for calculating the N-d convolution "transpose".
This function directly calculates a fractionally strided conv rather than
indirectly calculating the gradient (transpose) of a forward convolution.
Args:
lhs: a rank `n+2` dimensional input array.
rhs: a rank `n+2` dimensional array of kernel weights.
strides: sequence of `n` integers, sets fractional stride.
padding: 'SAME', 'VALID' will set as transpose of corresponding forward
conv, or a sequence of `n` integer 2-tuples describing before-and-after
padding for each `n` spatial dimension.
dimension_numbers: tuple of dimension descriptors as in
lax.conv_general_dilated. Defaults to tensorflow convention.
transpose_kernel: if True flips spatial axes and swaps the input/output
channel axes of the kernel. This makes the output of this function identical
to the gradient-derived functions like keras.layers.Conv2DTranspose
applied to the same kernel. For typical use in neural nets this is completely
pointless and just makes input/output channel specification confusing.
precision: Optional. Either `None`, which means the default precision for
the backend, or a `Precision` enum value.
Returns:
Transposed N-d convolution, with output padding following the conventions of
keras.layers.Conv2DTranspose.
"""
assert len(lhs.shape) == len(rhs.shape) and len(lhs.shape) > 2
ndims = len(lhs.shape)
one = (1,) * (ndims - 2)
# Set dimensional layout defaults if not specified.
if dimension_numbers is None:
if ndims == 3:
dimension_numbers = ('NHC', 'HIO', 'NHC')
elif ndims == 4:
dimension_numbers = ('NHWC', 'HWIO', 'NHWC')
elif ndims == 5:
dimension_numbers = ('NHWDC', 'HWDIO', 'NHWDC')
else:
raise ValueError('No 4+ dimensional dimension_number defaults.')
dn = conv_dimension_numbers(lhs.shape, rhs.shape, dimension_numbers)
k_shape = onp.take(rhs.shape, dn.rhs_spec)
k_sdims = k_shape[2:]
# Calculate correct output shape given padding and strides.
if padding in {'SAME', 'VALID'}:
pads = [_conv_transpose_padding(k, s, padding)
for k,s in zip(k_sdims.tolist(), strides)]
else:
pads = padding
if transpose_kernel:
# flip spatial dims and swap input / output channel axes
rhs = _flip_axes(rhs, onp.array(dn.rhs_spec)[2:])
rhs = onp.swapaxes(rhs, dn.rhs_spec[0], dn.rhs_spec[1])
return conv_general_dilated(lhs, rhs, one, pads, strides, one, dn,
precision=precision)
def full_like(x, fill_value, dtype=None, shape=None):
"""Create a full array like np.full based on the example array `x`.
Args:
x: example array-like, used for shape and dtype information.
fill_value: a scalar value to fill the entries of the output array.
dtype: optional, a dtype parameter for the output ndarray.
shape: optional, a shape parameter for the output ndarray.
Returns:
An ndarray with the same shape as `x` with its entries set equal to
`fill_value`, similar to the output of np.full.
"""
shape = onp.shape(x) if shape is None else _canonicalize_shape(shape)
out = full(shape, fill_value, dtype or _dtype(x))
return tie_in(x, out)
def collapse(operand, start_dimension, stop_dimension):
lo, hi = start_dimension, stop_dimension
size = prod(operand.shape[lo:hi])
new_shape = operand.shape[:lo] + (size,) + operand.shape[hi:]
return reshape(operand, new_shape)
def slice_in_dim(operand, start_index, limit_index, stride=1, axis=0):
"""Convenience wrapper around slice applying to only one dimension."""
start_indices = [0] * operand.ndim
limit_indices = list(operand.shape)
strides = [1] * operand.ndim
axis = int(axis)
start_indices[axis] = int(start_index)
limit_indices[axis] = int(limit_index)
strides[axis] = int(stride)
return slice(operand, start_indices, limit_indices, strides)
def index_in_dim(operand, index, axis=0, keepdims=True):
"""Convenience wrapper around slice to perform int indexing."""
index, axis = int(index), int(axis)
axis_size = operand.shape[axis]
wrapped_index = index + axis_size if index < 0 else index
if not 0 <= wrapped_index < axis_size:
msg = 'index {} is out of bounds for axis {} with size {}'
raise IndexError(msg.format(index, axis, axis_size))
result = slice_in_dim(operand, wrapped_index, wrapped_index + 1, 1, axis)
if keepdims:
return result
else:
return reshape(result, onp.delete(operand.shape, axis))
def dynamic_slice_in_dim(operand, start_index, slice_size, axis=0):
"""Convenience wrapper around dynamic_slice applying to one dimension."""
start_indices = [onp.array([0], dtype=_dtype(start_index))] * operand.ndim
slice_sizes = list(operand.shape)
axis = int(axis)
axis_size = _const(start_index, operand.shape[axis])
start_indices[axis] = reshape(rem(start_index, axis_size), [1])
slice_sizes[axis] = int(slice_size)
start_indices = concatenate(start_indices, 0)
return dynamic_slice(operand, start_indices, slice_sizes)
def dynamic_index_in_dim(operand, index, axis=0, keepdims=True):
"""Convenience wrapper around dynamic_slice to perform int indexing."""
result = dynamic_slice_in_dim(operand, index, 1, axis)
if keepdims:
return result
else:
return reshape(result, onp.delete(operand.shape, axis))
def dynamic_update_slice_in_dim(operand, update, start_index, axis):
axis = int(axis)
start_indices = [0] * _ndim(operand)
start_indices[axis] = start_index % operand.shape[axis]
return dynamic_update_slice(operand, update, start_indices)
def dynamic_update_index_in_dim(operand, update, index, axis):
axis = int(axis)
if _ndim(update) != _ndim(operand):
assert _ndim(update) + 1 == _ndim(operand)
ax = axis % _ndim(operand)
update = reshape(update, operand.shape[:ax] + (1,) + operand.shape[ax+1:])
return dynamic_update_slice_in_dim(operand, update, index, axis)
def batch_matmul(lhs, rhs):
"""Batch matrix multiplication."""
if _min(lhs.ndim, rhs.ndim) < 2:
raise ValueError('Arguments to batch_matmul must be at least 2D, got {}, {}'
.format(lhs.ndim, rhs.ndim))
if lhs.ndim != rhs.ndim:
raise ValueError('Arguments to batch_matmul must have same ndim, got {}, {}'
.format(lhs.ndim, rhs.ndim))
lhs_contract = (lhs.ndim - 1,)
rhs_contract = (rhs.ndim - 2,)
batch = tuple(range(lhs.ndim - 2))
return dot_general(lhs, rhs, [(lhs_contract, rhs_contract), (batch, batch)])
# These trig functions also exist in the XLA client library, but we treat them
# as non-primitive to maintain a smaller set of autodiff primitives.
def sqrt(x):
r"""Elementwise square root: :math:`\sqrt{x}`."""
return sqrt_p.bind(x)
def rsqrt(x):
r"""Elementwise reciprocal square root: :math:`1 \over \sqrt{x}`."""
return pow(x, _const(x, -0.5))
def square(x):
r"""Elementwise square: :math:`x^2`."""
return mul(x, x)
def reciprocal(x):
r"""Elementwise reciprocal: :math:`1 \over x`."""
return div(_const(x, 1), x)
def tan(x):
r"""Elementwise tangent: :math:`\mathrm{tan}(x)`."""
return div(sin(x), cos(x))
def asin(x):
r"""Elementwise arc sine: :math:`\mathrm{asin}(x)`."""
return mul(_const(x, 2),
atan2(x, add(_const(x, 1), sqrt(sub(_const(x, 1), square(x))))))
def acos(x):
r"""Elementwise arc cosine: :math:`\mathrm{acos}(x)`."""
return select(
ne(x, _const(x, -1.0)),
mul(_const(x, 2),
atan2(sqrt(sub(_const(x, 1), square(x))), add(_const(x, 1), x))),
full_like(x, onp.pi))
def atan(x):
r"""Elementwise arc tangent: :math:`\mathrm{atan}(x)`."""
return atan2(x, _const(x, 1))
def sinh(x):
r"""Elementwise hyperbolic sine: :math:`\mathrm{sinh}(x)`."""
log_half = _const(x, onp.log(0.5))
# This formulation avoids overflow when e^x is inf but e^x/2 is not inf.
return sub(exp(add(log_half, x)), exp(sub(log_half, x)))
def cosh(x):
r"""Elementwise hyperbolic cosine: :math:`\mathrm{cosh}(x)`."""
log_half = _const(x, onp.log(0.5))
# This formulation avoids overflow when e^x is inf but e^x/2 is not inf.
return add(exp(add(log_half, x)), exp(sub(log_half, x)))
def asinh(x):
r"""Elementwise arc hyperbolic sine: :math:`\mathrm{asinh}(x)`."""
# asinh(x) = log(x + sqrt(x**2 + 1))
result = log(add(x, sqrt(add(mul(x, x), _const(x, 1)))))
if onp.issubdtype(_dtype(result), onp.complexfloating):
return result
a = abs(x)
sqrt_max_value = onp.sqrt(onp.finfo(_dtype(x)).max)
return select(lt(a, _const(a, sqrt_max_value)),
result,
mul(sign(x), add(log(a), _const(a, onp.log(2.)))))
def acosh(x):
r"""Elementwise arc hyperbolic cosine: :math:`\mathrm{acosh}(x)`."""
# acosh(x) = log(x + sqrt((x + 1) * (x - 1))) if x < sqrt_max_value
# log(x) + log(2) otherwise
sqrt_max_value = onp.sqrt(onp.finfo(_dtype(x)).max)
result = log(add(x, mul(sqrt(add(x, _const(x, 1))),
sqrt(sub(x, _const(x, 1))))))
if onp.issubdtype(_dtype(result), onp.complexfloating):
return result
return select(
lt(x, _const(x, sqrt_max_value)),
result,
add(log(x), _const(x, onp.log(2.))))
def atanh(x):
r"""Elementwise arc hyperbolic tangent: :math:`\mathrm{atanh}(x)`."""
# atanh(x) = 0.5 * log((1 + x) / (1 - x))
result = mul(_const(x, 0.5), log(div(add(_const(x, 1), x),
sub(_const(x, 1), x))))
if onp.issubdtype(_dtype(result), onp.complexfloating):
return result
return select(le(abs(x), _one(x)), result, full_like(x, onp.nan))
# Add some methods to ShapedArray that rely on lax primitives
ShapedArray.broadcast = core.aval_method(broadcast)
ShapedArray.transpose = core.aval_method(transpose) # clobbered by lax_numpy
ShapedArray.reshape = core.aval_method(reshape) # clobbered by lax_numpy
def _iter(tracer):
if tracer.ndim == 0:
raise TypeError("iteration over a 0-d array") # same as numpy error
else:
n = tracer.shape[0]
return (index_in_dim(tracer, i, keepdims=False) for i in xrange(n))
ShapedArray._iter = staticmethod(_iter)
# Add some ad handlers that use (or could use) lax primitives
def zeros_like_array(x):
return full_like(x, 0)
for t in itertools.chain(array_types, [xla.DeviceArray]):
ad_util.jaxval_adders[t] = add
ad_util.jaxval_zeros_likers[xla.DeviceArray] = zeros_like_array
### primitives
_input_dtype = lambda *args, **_: xla_bridge.canonicalize_dtype(args[0].dtype)
_fixed_dtype = lambda dtype: lambda *args, **kwargs: xla_bridge.canonicalize_dtype(dtype)
_complex_basetype = lambda dtype: onp.abs(onp.zeros((), dtype)).dtype
def standard_primitive(shape_rule, dtype_rule, name, translation_rule=None):
prim = Primitive(name)
prim.def_impl(partial(xla.apply_primitive, prim))
prim.def_abstract_eval(partial(standard_abstract_eval, shape_rule, dtype_rule))
xla.translations[prim] = translation_rule or partial(standard_translate, name)
return prim
def standard_abstract_eval(shape_rule, dtype_rule, *args, **kwargs):
assert all(isinstance(arg, UnshapedArray) for arg in args), args
least_specialized = _max(
map(type, args), key=operator.attrgetter('array_abstraction_level'))
if least_specialized is ConcreteArray:
return ShapedArray(shape_rule(*args, **kwargs), dtype_rule(*args, **kwargs))
elif least_specialized is ShapedArray:
return ShapedArray(shape_rule(*args, **kwargs), dtype_rule(*args, **kwargs))
elif least_specialized is UnshapedArray:
return UnshapedArray(dtype_rule(*args, **kwargs))
else:
raise TypeError(args, least_specialized)
def standard_translate(name, c, *args, **kwargs):
xla_opname = ''.join(term.capitalize() for term in name.split('_'))
return getattr(c, xla_opname)(*args, **kwargs)
def unop_dtype_rule(result_dtype, accepted_dtypes, name, aval, **kwargs):
if not any(onp.issubdtype(aval.dtype, t) for t in accepted_dtypes):
msg = '{} does not accept dtype {}. Accepted dtypes are subtypes of {}.'
typename = str(onp.dtype(aval.dtype).name)
accepted_typenames = (str(onp.dtype(t).name) for t in accepted_dtypes)
raise TypeError(msg.format(name, typename, ', '.join(accepted_typenames)))
return result_dtype(aval.dtype)
def unop(result_dtype, accepted_dtypes, name):
dtype_rule = partial(unop_dtype_rule, result_dtype, accepted_dtypes, name)
prim = standard_primitive(_attrgetter('shape'), dtype_rule, name)
batching.defvectorized(prim)
return prim
standard_unop = partial(unop, _identity)
_attrgetter = lambda name: lambda x, **kwargs: getattr(x, name)
def binop_dtype_rule(result_dtype, accepted_dtypes, name, *avals, **kwargs):
aval_dtypes = [aval.dtype for aval in avals]
for i, (aval_dtype, types) in enumerate(zip(aval_dtypes, accepted_dtypes)):
if not any(onp.issubdtype(aval_dtype, t) for t in types):
msg = ('{} does not accept dtype {} at position {}. '
'Accepted dtypes at position {} are subtypes of {}.')
typename = str(onp.dtype(aval_dtype).name)
typenames = ', '.join(str(onp.dtype(t).name) for t in types)
raise TypeError(msg.format(name, typename, i, i, typenames))
_check_same_dtypes(name, False, *aval_dtypes)
return result_dtype(*avals)
def _broadcasting_shape_rule(name, *avals):
shapes = onp.array([aval.shape for aval in avals if aval.shape])
if not shapes.size:
return ()
if len({len(shape) for shape in shapes}) != 1:
msg = '{} got arrays of different rank: {}.'
raise TypeError(msg.format(name, ', '.join(map(str, map(tuple, shapes)))))
min_shape = onp.min(shapes, axis=0)
max_shape = onp.max(shapes, axis=0)
result_shape = onp.where(min_shape == 0, 0, max_shape)
if not onp.all((shapes == result_shape) | (shapes == 1)):
msg = '{} got incompatible shapes for broadcasting: {}.'
raise TypeError(msg.format(name, ', '.join(map(str, map(tuple, shapes)))))
return tuple(result_shape)
def binop(result_dtype, accepted_dtypes, name, translation_rule=None):
dtype_rule = partial(binop_dtype_rule, result_dtype, accepted_dtypes, name)
shape_rule = partial(_broadcasting_shape_rule, name)
prim = standard_primitive(shape_rule, dtype_rule, name,
translation_rule=translation_rule)
batching.defbroadcasting(prim)
return prim
standard_binop = partial(binop, _input_dtype)
# NOTE(mattjj): this isn't great for orchestrate fwd mode because it means JVPs
# get two extra ops in them: a reshape and a broadcast_in_dim (or sometimes just
# a broadcast). but saving the shape info with the primitives isn't great either
# because then we can't trace these ops without shape data.
def _brcast(x, *others):
# Used in jvprules to make binop broadcasting explicit for transposability.
# Requires shape info during jvp tracing, which isn't strictly necessary.
# We don't need full numpy broadcasting, but otherwise the logic is the same
# so we reuse the broadcast_shapes function after filtering out scalars.
shapes = tuple(filter(None, map(onp.shape, (x,) + others)))
shape = shapes and broadcast_shapes(*shapes)
if onp.shape(x) != shape:
return _brcast_to(x, shape)
else:
return x
def _brcast_to(x, shape):
x_shape = onp.shape(x)
assert x_shape != shape
if x_shape:
assert len(x_shape) == len(shape)
broadcast_dimensions, = onp.where(onp.equal(x_shape, shape))
squeezed_dimensions, = onp.where(onp.not_equal(x_shape, shape))
inshape = onp.delete(x_shape, squeezed_dimensions)
return broadcast_in_dim(reshape(x, inshape), shape, broadcast_dimensions)
else:
return broadcast(x, shape)
_float = {onp.floating}
_complex = {onp.complexfloating}
_complex_elem_types = {onp.float32, onp.float64}
_int = {onp.integer}
_bool = {onp.bool_}
_num = _int | _float | _complex
_any = _int | _float | _complex | _bool
neg_p = standard_unop(_num, 'neg')
ad.deflinear(neg_p, lambda t: [neg(t)])
batching.defvectorized(neg_p)
sign_p = standard_unop(_num, 'sign')
ad.defjvp_zero(sign_p)
floor_p = standard_unop(_float, 'floor')
ad.defjvp_zero(floor_p)
ceil_p = standard_unop(_float, 'ceil')
ad.defjvp_zero(ceil_p)
round_p = standard_unop(_float, 'round')
ad.defjvp_zero(round_p)
is_finite_p = unop(_fixed_dtype(onp.bool_), _float, 'is_finite')
ad.defjvp_zero(is_finite_p)
exp_p = standard_unop(_float | _complex, 'exp')
ad.defjvp2(exp_p, lambda g, ans, x: _safe_mul(g, ans))
log_p = standard_unop(_float | _complex, 'log')
ad.defjvp(log_p, lambda g, x: div(g, x))
expm1_p = standard_unop(_float | _complex, 'expm1')
ad.defjvp2(expm1_p, lambda g, ans, x: mul(g, add(ans, _one(ans))))
log1p_p = standard_unop(_float | _complex, 'log1p')
ad.defjvp(log1p_p, lambda g, x: div(g, add(x, _one(x))))
tanh_p = standard_unop(_float | _complex, 'tanh')
ad.defjvp2(tanh_p, lambda g, ans, x: mul(g, sub(_one(x), mul(ans, ans))))
sin_p = standard_unop(_float | _complex, 'sin')
ad.defjvp(sin_p, lambda g, x: mul(g, cos(x)))
cos_p = standard_unop(_float | _complex, 'cos')
ad.defjvp(cos_p, lambda g, x: neg(mul(g, sin(x))))
atan2_p = standard_binop([_float, _float], 'atan2')
ad.defjvp(atan2_p,
lambda g, x, y: _brcast(g, y) * (y / (square(x) + square(y))),
lambda g, x, y: _brcast(g, x) * -x / (square(x) + square(y)))
lgamma_p = standard_unop(_float, 'lgamma')
ad.defjvp(lgamma_p, lambda g, x: mul(g, digamma(x)))
digamma_p = standard_unop(_float, 'digamma')
erf_p = standard_unop(_float, 'erf')
ad.defjvp(erf_p, lambda g, x: mul(_const(x, 2. / onp.sqrt(onp.pi)),
mul(g, exp(neg(square(x))))))
erfc_p = standard_unop(_float, 'erfc')
ad.defjvp(erfc_p, lambda g, x: mul(_const(x, 2. / onp.sqrt(onp.pi)),
mul(neg(g), exp(neg(square(x))))))
erf_inv_p = standard_unop(_float, 'erf_inv')
ad.defjvp2(erf_inv_p, lambda g, ans, x: mul(_const(x, onp.sqrt(onp.pi) / 2.),
mul(g, exp(square(ans)))))
real_p = unop(_complex_basetype, _complex, 'real')
ad.deflinear(real_p, lambda t: [complex(t, onp.zeros((), _dtype(t)))])
imag_p = unop(_complex_basetype, _complex, 'imag')
ad.defjvp(imag_p, lambda g, _: real(mul(_const(g, -1j), g)))
_complex_dtype = lambda dtype, *args: (onp.zeros((), dtype) + onp.zeros((), onp.complex64)).dtype
complex_p = binop(_complex_dtype, [_complex_elem_types, _complex_elem_types],
'complex')
ad.deflinear(complex_p, lambda t: [real(t), imag(neg(t))])
conj_p = unop(_complex_dtype, _float | _complex, 'conj')
def _conj_transpose_rule(t, x, input_dtype):
assert x is None
if onp.issubdtype(input_dtype, onp.complexfloating):
return [conj(t)]
else:
return [real(t)]
xla.translations[conj_p] = lambda c, x, **kwargs: c.Conj(x)
ad.primitive_jvps[conj_p] = partial(ad.linear_jvp, conj_p)
ad.primitive_transposes[conj_p] = _conj_transpose_rule
abs_p = unop(_complex_basetype, _num, 'abs')
ad.defjvp2(abs_p,
lambda g, ans, x:
div(_maybe_real(mul(g, _maybe_conj(x))), _replace_zero(ans)))
_maybe_conj = lambda x: conj(x) if _iscomplex(x) else x
_maybe_real = lambda x: real(x) if _iscomplex(x) else x
sqrt_p = standard_unop(_float | _complex, 'sqrt')
ad.defjvp2(sqrt_p, lambda g, ans, x: _safe_mul(g, div(_const(x, 0.5), ans)))
pow_p = standard_binop([_float | _complex, _float | _complex], 'pow')
def _pow_jvp_lhs(g, x, y):
# we call _safe_mul here so that we get the behavior 0*inf = 0, since when a
# coefficient in `g` is zero we want to keep it at zero, not produce a nan.
# see https://github.com/google/jax/pull/383
jac = mul(y, pow(x, select(eq(y, _zeros(y)), _ones(y), sub(y, _ones(y)))))
return _safe_mul(_brcast(g, y), jac)
def _pow_jvp_rhs(g, x, y):
return mul(_brcast(g, x), mul(log(_replace_zero(x)), pow(x, y)))
ad.defjvp(pow_p, _pow_jvp_lhs, _pow_jvp_rhs)
_replace_zero = lambda x: select(eq(x, _const(x, 0)), _ones(x), x)
not_p = standard_unop(_int | _bool, 'not')
and_p = standard_binop([_any, _any], 'and')
ad.defjvp_zero(and_p)
or_p = standard_binop([_any, _any], 'or')
ad.defjvp_zero(or_p)
xor_p = standard_binop([_any, _any], 'xor')
ad.defjvp_zero(xor_p)
def _add_transpose(t, x, y):
# assert x is None and y is None # computation must be linear, not affine
return [t, t]
add_p = standard_binop([_num, _num], 'add')
ad.defjvp(add_p, lambda g, x, y: _brcast(g, y), lambda g, x, y: _brcast(g, x))
ad.primitive_transposes[add_p] = _add_transpose
def _sub_transpose(t, x, y):
assert x is None and y is None # computation must be linear, not affine
return [t, neg(t) if t is not ad_util.zero else ad_util.zero]
sub_p = standard_binop([_num, _num], 'sub')
ad.defjvp(sub_p,
lambda g, x, y: _brcast(g, y),
lambda g, x, y: _brcast(neg(g), x))
ad.primitive_transposes[sub_p] = _sub_transpose
mul_p = standard_binop([_num, _num], 'mul')
ad.defbilinear_broadcasting(_brcast, mul_p, mul, mul)
def _safe_mul_translation_rule(c, x, y):
dtype = c.GetShape(x).numpy_dtype()
zero = c.Constant(onp.array(0, dtype=dtype))
out_shape = broadcast_shapes(c.GetShape(x).dimensions(),
c.GetShape(y).dimensions())
return c.Select(c.Or(c.Eq(x, zero), c.Eq(y, zero)),
c.Broadcast(zero, out_shape),
c.Mul(x, y))
safe_mul_p = standard_binop([_num, _num], 'safe_mul',
translation_rule=_safe_mul_translation_rule)
ad.defbilinear_broadcasting(_brcast, safe_mul_p, _safe_mul, _safe_mul)
def _div_transpose_rule(cotangent, x, y):
assert x is None and y is not None
res = ad_util.zero if cotangent is ad_util.zero else div(cotangent, y)
return res, None
div_p = standard_binop([_num, _num], 'div')
ad.defjvp(div_p,
lambda g, x, y: div(_brcast(g, y), y),
lambda g, x, y: div(mul(neg(_brcast(g, x)), x), pow(y, _two(y))))
ad.primitive_transposes[div_p] = _div_transpose_rule
rem_p = standard_binop([_num, _num], 'rem')
ad.defjvp(rem_p,
lambda g, x, y: _brcast(g, y),
lambda g, x, y: mul(neg(g), floor(div(x, y))))
def _broadcasting_select(c, which, x, y):
"""Wrapper around XLA `Select` that broadcasts its arguments."""
which_shape, x_shape, y_shape = (
c.GetShape(t).dimensions() for t in (which, x, y))
out_shape = broadcast_shapes(which_shape, x_shape, y_shape)
bcast_dims = lambda shape: tuple(range(len(out_shape) - len(shape),
len(out_shape)))
which = c.BroadcastInDim(which, out_shape, bcast_dims(which_shape))
x = c.BroadcastInDim(x, out_shape, bcast_dims(x_shape))
y = c.BroadcastInDim(y, out_shape, bcast_dims(y_shape))
return c.Select(which, x, y)
def _minmax_translation_rule(c, x, y, minmax=None, cmp=None):
dtype = c.GetShape(x).numpy_dtype()
if onp.issubdtype(dtype, onp.complexfloating):
comparator = cmp(c)
rx = c.Real(x)
ry = c.Real(y)
return _broadcasting_select(
c, c.Select(c.Eq(rx, ry), comparator(c.Imag(x), c.Imag(y)),
comparator(rx, ry)),
x, y)
return minmax(c)(x, y)
max_p = standard_binop([_any, _any], 'max', translation_rule=partial(
_minmax_translation_rule, minmax=lambda c: c.Max, cmp=lambda c: c.Gt))
ad.defjvp2(max_p,
lambda g, ans, x, y: mul(_brcast(g, y), _balanced_eq(x, ans, y)),
lambda g, ans, x, y: mul(_brcast(g, x), _balanced_eq(y, ans, x)))
min_p = standard_binop([_any, _any], 'min', translation_rule=partial(
_minmax_translation_rule, minmax=lambda c: c.Min, cmp=lambda c: c.Lt))
ad.defjvp2(min_p,
lambda g, ans, x, y: mul(_brcast(g, y), _balanced_eq(x, ans, y)),
lambda g, ans, x, y: mul(_brcast(g, x), _balanced_eq(y, ans, x)))
shift_left_p = standard_binop([_int, _int], 'shift_left')
ad.defjvp_zero(shift_left_p)
shift_right_arithmetic_p = standard_binop([_int, _int], 'shift_right_arithmetic')
ad.defjvp_zero(shift_right_arithmetic_p)
shift_right_logical_p = standard_binop([_int, _int], 'shift_right_logical')
ad.defjvp_zero(shift_right_logical_p)
eq_p = binop(_fixed_dtype(onp.bool_), [_any, _any], 'eq')
ad.defjvp_zero(eq_p)
ne_p = binop(_fixed_dtype(onp.bool_), [_any, _any], 'ne')
ad.defjvp_zero(ne_p)
ge_p = binop(_fixed_dtype(onp.bool_), [_any, _any], 'ge')
ad.defjvp_zero(ge_p)
gt_p = binop(_fixed_dtype(onp.bool_), [_any, _any], 'gt')
ad.defjvp_zero(gt_p)
le_p = binop(_fixed_dtype(onp.bool_), [_any, _any], 'le')
ad.defjvp_zero(le_p)
lt_p = binop(_fixed_dtype(onp.bool_), [_any, _any], 'lt')
ad.defjvp_zero(lt_p)
def _convert_element_type_shape_rule(operand, new_dtype, old_dtype):
return operand.shape
def _convert_element_type_dtype_rule(operand, new_dtype, old_dtype):
return new_dtype
def _convert_element_type_translation_rule(c, operand, new_dtype, old_dtype):
new_etype = xla_client.dtype_to_etype(new_dtype)
return c.ConvertElementType(operand, new_element_type=new_etype)
convert_element_type_p = standard_primitive(
_convert_element_type_shape_rule, _convert_element_type_dtype_rule,
'convert_element_type', _convert_element_type_translation_rule)
ad.deflinear(
convert_element_type_p,
lambda t, new_dtype, old_dtype: [convert_element_type(t, old_dtype)])
batching.defvectorized(convert_element_type_p)
def _bitcast_convert_type_shape_rule(operand, new_dtype):
return operand.shape
def _bitcast_convert_type_dtype_rule(operand, new_dtype):
return new_dtype
def _bitcast_convert_type_translation_rule(c, operand, new_dtype):
new_etype = xla_bridge.dtype_to_etype(new_dtype)
return c.BitcastConvertType(operand, new_element_type=new_etype)
bitcast_convert_type_p = standard_primitive(
_bitcast_convert_type_shape_rule, _bitcast_convert_type_dtype_rule,
'bitcast_convert_type', _bitcast_convert_type_translation_rule)
ad.defjvp_zero(bitcast_convert_type_p)
batching.defvectorized(bitcast_convert_type_p)
def _conv_general_dilated_shape_rule(
lhs, rhs, window_strides, padding, lhs_dilation, rhs_dilation,
dimension_numbers, feature_group_count, **unused_kwargs):
assert type(dimension_numbers) is ConvDimensionNumbers
if not feature_group_count > 0:
msg = ("conv_general_dilated feature_group_count "
"must be a positive integer, got {}.")
raise ValueError(msg.format(feature_group_count))
lhs_feature_count = lhs.shape[dimension_numbers.lhs_spec[1]]
quot, rem = divmod(lhs_feature_count, feature_group_count)
if rem:
msg = ("conv_general_dilated feature_group_count must divide lhs feature "
"dimension size, but {} does not divide {}.")
raise ValueError(msg.format(feature_group_count, lhs_feature_count))
if quot != rhs.shape[dimension_numbers.rhs_spec[1]]:
msg = ("conv_general_dilated lhs feature dimension size divided by "
"feature_group_count must equal the rhs input feature dimension "
"size, but {} // {} != {}.")
raise ValueError(msg.format(lhs_feature_count, feature_group_count,
rhs.shape[dimension_numbers.rhs_spec[1]]))
if rhs.shape[dimension_numbers.rhs_spec[0]] % feature_group_count:
msg = ("conv_general_dilated rhs output feature dimension size must be a "
"multiple of feature_group_count, but {} is not a multiple of {}.")
raise ValueError(msg.format(rhs.shape[dimension_numbers.rhs_spec[0]],
feature_group_count))
lhs_perm, rhs_perm, out_perm = dimension_numbers
lhs_trans = _dilate_shape(onp.take(lhs.shape, lhs_perm), lhs_dilation)
rhs_trans = _dilate_shape(onp.take(rhs.shape, rhs_perm), rhs_dilation)
out_trans = conv_shape_tuple(lhs_trans, rhs_trans, window_strides, padding)
return tuple(onp.take(out_trans, onp.argsort(out_perm)))
def _conv_general_dilated_dtype_rule(
lhs, rhs, window_strides, padding, lhs_dilation, rhs_dilation,
dimension_numbers, **unused_kwargs):
return binop_dtype_rule(_input_dtype, [_float, _float],
'conv_general_dilated', lhs, rhs)
_conv_spec_transpose = lambda spec: (spec[1], spec[0]) + spec[2:]
_conv_sdims = lambda spec: spec[2:]
def _conv_general_dilated_transpose_lhs(
g, rhs, window_strides, padding, lhs_dilation, rhs_dilation,
dimension_numbers, feature_group_count,
lhs_shape, rhs_shape, precision):
assert type(dimension_numbers) is ConvDimensionNumbers
lhs_sdims, rhs_sdims, out_sdims = map(_conv_sdims, dimension_numbers)
lhs_spec, rhs_spec, out_spec = dimension_numbers
t_rhs_spec = _conv_spec_transpose(rhs_spec)
if feature_group_count > 1:
# in addition to switching the dims in the spec, need to move the feature
# group axis into the transposed rhs's output feature dim
rhs = _reshape_axis_out_of(rhs_spec[0], feature_group_count, rhs)
rhs = _reshape_axis_into(rhs_spec[0], rhs_spec[1], rhs)
trans_dimension_numbers = ConvDimensionNumbers(out_spec, t_rhs_spec, lhs_spec)
padding = _conv_general_vjp_lhs_padding(
onp.take(lhs_shape, lhs_sdims), onp.take(rhs_shape, rhs_sdims),
window_strides, onp.take(g.shape, out_sdims), padding, lhs_dilation,
rhs_dilation)
revd_weights = rev(rhs, rhs_sdims)
return conv_general_dilated(
g, revd_weights, window_strides=lhs_dilation, padding=padding,
lhs_dilation=window_strides, rhs_dilation=rhs_dilation,
dimension_numbers=trans_dimension_numbers,
feature_group_count=feature_group_count, precision=precision)
def _conv_general_dilated_transpose_rhs(
g, lhs, window_strides, padding, lhs_dilation, rhs_dilation,
dimension_numbers, feature_group_count,
lhs_shape, rhs_shape, precision):
assert type(dimension_numbers) is ConvDimensionNumbers
lhs_sdims, rhs_sdims, out_sdims = map(_conv_sdims, dimension_numbers)
lhs_trans, rhs_trans, out_trans = map(_conv_spec_transpose, dimension_numbers)
if feature_group_count > 1:
lhs = _reshape_axis_out_of(lhs_trans[0], feature_group_count, lhs)
lhs = _reshape_axis_into(lhs_trans[0], lhs_trans[1], lhs)
trans_dimension_numbers = ConvDimensionNumbers(lhs_trans, out_trans, rhs_trans)
padding = _conv_general_vjp_rhs_padding(
onp.take(lhs_shape, lhs_sdims), onp.take(rhs_shape, rhs_sdims),
window_strides, onp.take(g.shape, out_sdims), padding, lhs_dilation,
rhs_dilation)
return conv_general_dilated(
lhs, g, window_strides=rhs_dilation, padding=padding,
lhs_dilation=lhs_dilation, rhs_dilation=window_strides,
dimension_numbers=trans_dimension_numbers,
feature_group_count=feature_group_count, precision=precision)
def _conv_general_dilated_translation_rule(
c, lhs, rhs, window_strides, padding, lhs_dilation, rhs_dilation,
dimension_numbers, feature_group_count, precision, **unused_kwargs):
assert type(dimension_numbers) is ConvDimensionNumbers
dimension_numbers = _conv_general_proto(dimension_numbers)
return c.ConvGeneralDilated(lhs, rhs, window_strides, padding, lhs_dilation,
rhs_dilation, dimension_numbers,
feature_group_count,
precision_config=_precision_config(precision))
def _conv_general_dilated_batch_rule(
batched_args, batch_dims, window_strides, padding,
lhs_dilation, rhs_dilation, dimension_numbers,
feature_group_count, precision, **unused_kwargs):
lhs, rhs = batched_args
lhs_bdim, rhs_bdim = batch_dims
lhs_spec, rhs_spec, out_spec = dimension_numbers
if lhs_bdim is not None and rhs_bdim is not None:
assert lhs.shape[lhs_bdim] == rhs.shape[rhs_bdim]
new_lhs = _reshape_axis_into(lhs_bdim, lhs_spec[1], lhs)
new_rhs = _reshape_axis_into(rhs_bdim, rhs_spec[0], rhs)
out = conv_general_dilated(
new_lhs, new_rhs, window_strides, padding, lhs_dilation, rhs_dilation,
dimension_numbers,
feature_group_count=lhs.shape[lhs_bdim] * feature_group_count,
precision=precision)
out = _reshape_axis_out_of(out_spec[1], lhs.shape[lhs_bdim], out)
return out, out_spec[1]
elif lhs_bdim is not None:
new_lhs = _reshape_axis_into(lhs_bdim, lhs_spec[0], lhs)
out = conv_general_dilated(new_lhs, rhs, window_strides, padding,
lhs_dilation, rhs_dilation, dimension_numbers,
feature_group_count, precision=precision)
out = _reshape_axis_out_of(out_spec[0], lhs.shape[lhs_bdim], out)
return out, out_spec[0]
elif rhs_bdim is not None:
if feature_group_count == 1:
new_rhs = _reshape_axis_into(rhs_bdim, rhs_spec[0], rhs)
out = conv_general_dilated(lhs, new_rhs, window_strides, padding,
lhs_dilation, rhs_dilation, dimension_numbers,
feature_group_count, precision=precision)
out = _reshape_axis_out_of(out_spec[1], rhs.shape[rhs_bdim], out)
return out, out_spec[1]
else:
# feature_group needs to be outermost, so we need to factor it out of the
# rhs output feature dim, then factor the batch dim into the remaining rhs
# output feature dim, then put feature_group back in. we do something
# similar on the output. an alternative which would require more FLOPs but
# fewer reshapes would be to broadcast lhs.
new_rhs = _reshape_axis_out_of(rhs_spec[0] + int(rhs_bdim <= rhs_spec[0]),
feature_group_count, rhs)
new_rhs = _reshape_axis_into(rhs_bdim + int(rhs_spec[0] < rhs_bdim),
rhs_spec[0] + 1,
new_rhs)
new_rhs = _reshape_axis_into(rhs_spec[0], rhs_spec[0], new_rhs)
out = conv_general_dilated(lhs, new_rhs, window_strides, padding,
lhs_dilation, rhs_dilation, dimension_numbers,
feature_group_count, precision=precision)
out = _reshape_axis_out_of(out_spec[1], feature_group_count, out)
out = _reshape_axis_out_of(out_spec[1] + 1, rhs.shape[rhs_bdim], out)
out = _reshape_axis_into(out_spec[1], out_spec[1] + 1, out)
return out, out_spec[1]
conv_general_dilated_p = standard_primitive(
_conv_general_dilated_shape_rule, _conv_general_dilated_dtype_rule,
'conv_general_dilated', _conv_general_dilated_translation_rule)
ad.defbilinear(conv_general_dilated_p,
_conv_general_dilated_transpose_lhs,
_conv_general_dilated_transpose_rhs)
batching.primitive_batchers[conv_general_dilated_p] = \
_conv_general_dilated_batch_rule
def _reshape_axis_into(src, dst, x):
perm = [i for i in range(x.ndim) if i != src]
perm.insert(dst, src)
new_shape = list(onp.delete(x.shape, src))
new_shape[dst] *= x.shape[src]
return reshape(x, new_shape, perm)
def _reshape_axis_out_of(src, size1, x):
shape = list(x.shape)
size2, ragged = divmod(shape[src], size1)
assert not ragged
shape[src:src+1] = [size1, size2]
return reshape(x, shape)
def _dot_shape_rule(lhs, rhs, precision):
if lhs.ndim == 0 or rhs.ndim == 0:
msg = "Dot only supports rank 1 or above, got shapes {} and {}."
raise TypeError(msg.format(lhs.shape, rhs.shape))
if lhs.ndim > 2 or rhs.ndim > 2:
msg = "Dot only supports rank 2 or less, got shapes {} and {}."
raise TypeError(msg.format(lhs.shape, rhs.shape))
def require(shape_cond):
if not shape_cond:
msg = "Incompatible shapes for dot: got {} and {}."
raise TypeError(msg.format(lhs.shape, rhs.shape))
if lhs.ndim == rhs.ndim == 1:
require(lhs.shape == rhs.shape)
return ()
elif lhs.ndim == rhs.ndim == 2:
require(lhs.shape[1] == rhs.shape[0])
return (lhs.shape[0], rhs.shape[1])
elif rhs.ndim == 1:
require(lhs.shape[-1] == rhs.shape[0])
return lhs.shape[:-1]
else:
require(lhs.shape[-1] == rhs.shape[-2])
return lhs.shape[:-1] + rhs.shape[:-2] + rhs.shape[-1:]
def _dot_transpose_lhs(t, rhs, precision):
if onp.ndim(t) == onp.ndim(rhs) == 2:
return dot(t, transpose(rhs, (1, 0)), precision=precision)
elif onp.ndim(t) == 1 and onp.ndim(rhs) == 2:
return dot(rhs, t, precision=precision)
elif onp.ndim(t) == onp.ndim(rhs) == 1:
return _outer(t, rhs)
elif onp.ndim(t) == 0 or onp.ndim(rhs) == 0:
return mul(t, rhs)
else:
raise TypeError
def _dot_transpose_rhs(t, lhs, precision):
if onp.ndim(lhs) == onp.ndim(t) == 2:
return dot(transpose(lhs, (1, 0)), t)
elif onp.ndim(lhs) == 2 and onp.ndim(t) == 1:
return dot(t, lhs, precision=precision)
elif onp.ndim(t) == onp.ndim(lhs) == 1:
return _outer(lhs, t)
elif onp.ndim(t) == 0 or onp.ndim(lhs) == 0:
return mul(t, lhs)
else:
raise TypeError
def _outer(x, y):
assert onp.ndim(x) == onp.ndim(y) == 1
return mul(reshape(x, (x.shape[0], 1)), reshape(y, (1, y.shape[0])))
def _dot_batch_rule(batched_args, batch_dims, precision=None):
lhs, rhs = batched_args
lbd, rbd = batch_dims
T = lambda x: transpose(x, onp.arange(onp.ndim(x))[::-1])
# in some cases, we can call dot instead of dot_general
if max(onp.ndim(lhs), onp.ndim(rhs)) <= 2:
if rbd is None:
assert lbd in (0, 1)
if lbd == 0:
return dot(lhs, rhs, precision=precision), 0
else:
return dot(T(rhs), lhs, precision=precision), onp.ndim(rhs) - 1
if lbd is None:
assert rbd in (0, 1)
if rbd == onp.ndim(rhs) - 1:
return dot(lhs, rhs, precision=precision), onp.ndim(lhs) - 1
else:
return dot(rhs, T(lhs), precision=precision), 0
assert lbd is not None and rbd is not None
assert lhs.ndim == rhs.ndim == 2 # dot only supports rank 1 and above
lhs = batching.move_dim_to_front(lhs, lbd)
rhs = batching.move_dim_to_front(rhs, rbd)
out = dot_general(lhs, rhs, [((1,), (1,)), ((0,), (0,))],
precision=precision)
return out, 0
if lbd is None:
assert rbd is not None
lhs = broadcast(lhs, (rhs.shape[rbd],))
else:
lhs = batching.move_dim_to_front(lhs, lbd)
lhs_batch = (0,)
lhs_contracting = (onp.ndim(lhs) - 1,)
if rbd is None:
assert lbd is not None
rhs = broadcast(rhs, (lhs.shape[0],))
else:
rhs = batching.move_dim_to_front(rhs, rbd)
rhs_batch = (0,)
rhs_contracting = (onp.arange(1, onp.ndim(rhs))[-2:][0],)
dim_nums = [(lhs_contracting, rhs_contracting), (lhs_batch, rhs_batch)]
return dot_general(lhs, rhs, dim_nums, precision=precision), 0
def _precision_config(precision):
if precision is not None:
config = xla_client.PrecisionConfig()
config.operand_precision.extend((precision, precision))
return config
return None
def _dot_translation_rule(c, lhs, rhs, precision):
return c.Dot(lhs, rhs, precision_config=_precision_config(precision))
_dot_dtype_rule = partial(binop_dtype_rule, _input_dtype, [_num, _num], 'dot')
dot_p = standard_primitive(_dot_shape_rule, _dot_dtype_rule, 'dot',
_dot_translation_rule)
ad.defbilinear(dot_p, _dot_transpose_lhs, _dot_transpose_rhs)
batching.primitive_batchers[dot_p] = _dot_batch_rule
def _dot_general_shape_rule(lhs, rhs, dimension_numbers, precision):
(lhs_contracting, rhs_contracting), (lhs_batch, rhs_batch) = dimension_numbers
if len(lhs_batch) != len(rhs_batch):
msg = ("dot_general requires equal numbers of lhs_batch and rhs_batch "
"dimensions, got lhs_batch {} and rhs_batch {}.")
raise TypeError(msg.format(lhs_batch, rhs_batch))
if not onp.all(onp.equal(lhs_batch, rhs_batch)):
msg = ("dot_general requires same lhs and rhs batch dimension numbers, "
"got {} and {}.")
raise TypeError(msg.format(lhs_batch, rhs_batch))
lhs_batch_shape = onp.take(lhs.shape, lhs_batch)
rhs_batch_shape = onp.take(rhs.shape, rhs_batch)
if not onp.all(onp.equal(lhs_batch_shape, rhs_batch_shape)):
msg = ("dot_general requires lhs batch dimensions and rhs batch dimensions "
"to have the same shape, got {} and {}.")
raise TypeError(msg.format(lhs_batch_shape, rhs_batch_shape))
if tuple(sorted(lhs_batch)) != tuple(range(len(lhs_batch))):
msg = ("dot_general requires lhs batch dimensions to precede contracting "
"and non-contracting dimensions, got lhs_batch {}.")
raise TypeError(msg.format(lhs_batch))
if tuple(sorted(rhs_batch)) != tuple(range(len(rhs_batch))):
msg = ("dot_general requires rhs batch dimensions to precede contracting "
"and non-contracting dimensions, got rhs_batch {}.")
raise TypeError(msg.format(rhs_batch))
lhs_contracting_shape = onp.take(lhs.shape, lhs_contracting)
rhs_contracting_shape = onp.take(rhs.shape, rhs_contracting)
if not onp.all(onp.equal(lhs_contracting_shape, rhs_contracting_shape)):
msg = ("dot_general requires contracting dimensions to have the same "
"shape, got {} and {}.")
raise TypeError(msg.format(lhs_contracting_shape, rhs_contracting_shape))
batch_shape = tuple(onp.take(lhs.shape, lhs_batch))
lhs_contract_or_batch = tuple(lhs_contracting) + tuple(lhs_batch)
lhs_tensored_shape = tuple(onp.delete(lhs.shape, lhs_contract_or_batch))
rhs_contract_or_batch = tuple(rhs_contracting) + tuple(rhs_batch)
rhs_tensored_shape = tuple(onp.delete(rhs.shape, rhs_contract_or_batch))
return batch_shape + lhs_tensored_shape + rhs_tensored_shape
def _dot_general_dtype_rule(lhs, rhs, dimension_numbers, precision):
return binop_dtype_rule(_input_dtype, [_num, _num], 'dot_general', lhs, rhs)
def _dot_general_transpose_lhs(g, y, dimension_numbers, precision,
swap_ans=False):
(x_contract, y_contract), (x_batch, y_batch) = dimension_numbers
x_ndim = g.ndim - y.ndim + len(x_batch) + 2 * len(x_contract)
x_kept = remaining(range(x_ndim), x_contract, x_batch)
y_kept = remaining(range(y.ndim), y_contract, y_batch)
if swap_ans:
ans_batch, ans_y, _ = ranges_like(x_batch, y_kept, x_kept)
else:
ans_batch, _, ans_y = ranges_like(x_batch, x_kept, y_kept)
dims = ((ans_y, y_kept), (ans_batch, y_batch))
x_contract_sorted_by_y = list(onp.take(x_contract, onp.argsort(y_contract)))
out_axes = onp.argsort(list(x_batch) + x_kept + x_contract_sorted_by_y)
return transpose(dot_general(g, y, dims), tuple(out_axes))
def _dot_general_transpose_rhs(g, x, dimension_numbers, precision):
(x_contract, y_contract), (x_batch, y_batch) = dimension_numbers
swapped_dimension_numbers = ((y_contract, x_contract), (y_batch, x_batch))
return _dot_general_transpose_lhs(g, x, swapped_dimension_numbers,
precision, swap_ans=True)
def _dot_general_batch_rule(batched_args, batch_dims, dimension_numbers,
precision):
(lhs_contract, rhs_contract), (lhs_batch, rhs_batch) = dimension_numbers
lhs, rhs = batched_args
lbd, rbd = batch_dims
assert lbd is not None or rbd is not None
if lbd is not None:
if lbd != 0:
lhs = batching.move_dim_to_front(lhs, lbd)
lbd = 0
else:
assert rbd is not None
lhs = broadcast(lhs, (rhs.shape[rbd],))
lhs_contract = tuple(onp.add(1, lhs_contract))
lhs_batch = (0,) + tuple(onp.add(1, lhs_batch))
if rbd is not None:
if rbd != 0:
rhs = batching.move_dim_to_front(rhs, rbd)
rbd = 0
else:
assert lbd is not None
rhs = broadcast(rhs, (lhs.shape[lbd],))
rhs_contract = tuple(onp.add(1, rhs_contract))
rhs_batch = (0,) + tuple(onp.add(1, rhs_batch))
new_dimension_numbers = [(lhs_contract, rhs_contract), (lhs_batch, rhs_batch)]
batched_out = dot_general(lhs, rhs, new_dimension_numbers,
precision=precision)
return batched_out, 0
def _dot_general_translation_rule(c, lhs, rhs, dimension_numbers, precision):
return c.DotGeneral(lhs, rhs, dimension_numbers,
precision_config=_precision_config(precision))
dot_general_p = standard_primitive(_dot_general_shape_rule,
_dot_general_dtype_rule, 'dot_general',
_dot_general_translation_rule)
ad.defbilinear(dot_general_p,
_dot_general_transpose_lhs, _dot_general_transpose_rhs)
batching.primitive_batchers[dot_general_p] = _dot_general_batch_rule
def _broadcast_shape_rule(operand, sizes):
_check_shapelike('broadcast', 'sizes', sizes)
return tuple(sizes) + operand.shape
def _broadcast_batch_rule(batched_args, batch_dims, sizes):
operand, = batched_args
bdim, = batch_dims
new_bdim = None if bdim is None else bdim + len(sizes)
return broadcast(operand, sizes), new_bdim
broadcast_p = standard_primitive(
_broadcast_shape_rule, _input_dtype, 'broadcast')
ad.deflinear(broadcast_p, lambda t, sizes: [_reduce_sum(t, range(len(sizes)))])
batching.primitive_batchers[broadcast_p] = _broadcast_batch_rule
def _broadcast_in_dim_shape_rule(operand, shape, broadcast_dimensions):
_check_shapelike('broadcast_in_dim', 'shape', shape)
_check_shapelike('broadcast_in_dim', 'broadcast_dimensions',
broadcast_dimensions)
if operand.ndim != len(broadcast_dimensions):
msg = ('broadcast_in_dim broadcast_dimensions must have length equal to '
'operand ndim, got broadcast_dimensions {} for operand ndim {}.')
raise TypeError(msg.format(broadcast_dimensions, operand.ndim))
if not set(broadcast_dimensions).issubset(set(range(len(shape)))):
msg = ('broadcast_in_dim broadcast_dimensions must be a subset of output '
'dimensions, got {} for operand ndim {} and shape {}.')
raise TypeError(msg.format(broadcast_dimensions, operand.ndim, shape))
return shape
def _broadcast_in_dim_transpose_rule(t, shape, broadcast_dimensions):
axes = tuple(onp.delete(range(len(shape)), broadcast_dimensions))
return [_reduce_sum(t, axes)]
def _broadcast_in_dim_batch_rule(batched_args, batch_dims, shape,
broadcast_dimensions):
operand, = batched_args
bdim, = batch_dims
new_operand = batching.move_dim_to_front(operand, bdim)
new_shape = (operand.shape[bdim],) + shape
new_broadcast_dimensions = (0,) + tuple(onp.add(1, broadcast_dimensions))
return broadcast_in_dim(new_operand, new_shape, new_broadcast_dimensions), 0
broadcast_in_dim_p = standard_primitive(
_broadcast_in_dim_shape_rule, _input_dtype, 'broadcast_in_dim')
ad.deflinear(broadcast_in_dim_p, _broadcast_in_dim_transpose_rule)
batching.primitive_batchers[broadcast_in_dim_p] = _broadcast_in_dim_batch_rule
def _clamp_shape_rule(min, operand, max):
if min.shape and min.shape != operand.shape:
m = "clamp requires min.shape == operand.shape or min.shape == (), got {}."
raise TypeError(m.format(min.shape))
if max.shape and max.shape != operand.shape:
m = "clamp requires max.shape == operand.shape or max.shape == (), got {}."
raise TypeError(m.format(max.shape))
return operand.shape
_clamp_dtype_rule = partial(binop_dtype_rule, _input_dtype, [_any, _any, _any],
'clamp')
clamp_p = standard_primitive(_clamp_shape_rule, _clamp_dtype_rule, 'clamp')
ad.defjvp(clamp_p,
lambda g, min, operand, max:
select(bitwise_and(gt(min, operand), lt(min, max)),
_brcast(g, operand), _zeros(operand)),
lambda g, min, operand, max:
select(bitwise_and(gt(operand, min), lt(operand, max)),
g, _zeros(operand)),
lambda g, min, operand, max:
select(lt(max, operand), _brcast(g, operand), _zeros(operand)))
def _concatenate_shape_rule(*operands, **kwargs):
dimension = kwargs.pop('dimension')
if not operands:
msg = "concatenate expects at least one operand, got 0."
raise TypeError(msg)
if not all(isinstance(operand, UnshapedArray) for operand in operands):
msg = "All objects to concatenate must be arrays, got {}."
op = next(op for op in operands if not isinstance(op, UnshapedArray))
raise TypeError(msg.format(type(op)))
if len(set(operand.ndim for operand in operands)) != 1:
msg = "Cannot concatenate arrays with different ranks, got {}."
raise TypeError(msg.format(", ".join(str(o.ndim) for o in operands)))
shapes = onp.array([operand.shape for operand in operands])
if not 0 <= dimension < shapes.shape[1]:
msg = "concatenate dimension out of bounds: dimension {} for shapes {}."
raise TypeError(msg.format(dimension, ", ".join(map(str, shapes))))
if not onp.all(onp.delete(shapes[0] == shapes, dimension, axis=1)):
msg = ("Cannot concatenate arrays with shapes that differ in dimensions "
"other than the one being concatenated: dimension {} for shapes {}.")
raise TypeError(msg.format(dimension, ", ".join(map(str, shapes))))
concat_size = sum(o.shape[dimension] for o in operands)
ex_shape = operands[0].shape
return ex_shape[:dimension] + (concat_size,) + ex_shape[dimension+1:]
def _concatenate_dtype_rule(*operands, **kwargs):
_check_same_dtypes('concatenate', False, *(o.dtype for o in operands))
return operands[0].dtype
def _concatenate_translation_rule(c, *operands, **kwargs):
dimension = kwargs.pop('dimension')
return c.Concatenate(operands, dimension=dimension)
def _concatenate_transpose_rule(t, *operands, **kwargs):
dimension = kwargs.pop('dimension')
operand_shapes = kwargs.pop('operand_shapes')
if t is ad_util.zero:
return [ad_util.zero if o is None else None for o in operands]
else:
limit_points = onp.cumsum([shape[dimension] for shape in operand_shapes])
starts = onp.zeros((len(operands), t.ndim), dtype=int)
starts[1:, dimension] = limit_points[:-1]
limits = onp.tile(t.shape, (len(operands), 1))
limits[:, dimension] = limit_points
return [slice(t, start, limit) if o is None else None
for o, start, limit in zip(operands, starts, limits)]
def _concatenate_batch_rule(batched_args, batch_dims, dimension, operand_shapes):
size = next(op.shape[bdim] for op, bdim in zip(batched_args, batch_dims)
if bdim is not None)
operands = [batching.move_dim_to_front(op, bdim) if bdim is not None
else broadcast(op, (size,))
for op, bdim in zip(batched_args, batch_dims)]
return concatenate(operands, dimension + 1), 0
concatenate_p = standard_primitive(
_concatenate_shape_rule, _concatenate_dtype_rule, 'concatenate',
_concatenate_translation_rule)
ad.deflinear(concatenate_p, _concatenate_transpose_rule)
ad.primitive_transposes[concatenate_p] = _concatenate_transpose_rule
batching.primitive_batchers[concatenate_p] = _concatenate_batch_rule
def _pad_shape_rule(operand, padding_value, padding_config):
if operand.dtype != padding_value.dtype:
msg = "pad operand and padding_value must be same dtype: got {} and {}."
raise TypeError(msg.format(operand.dtype, padding_value.dtype))
lo, hi, interior = zip(*padding_config)
out_shape = onp.add(onp.add(onp.add(lo, hi), operand.shape),
onp.multiply(interior, onp.subtract(operand.shape, 1)))
return tuple(out_shape)
def _pad_transpose(t, operand, padding_value, padding_config):
if t is ad_util.zero:
return [ad_util.zero if operand is None else None,
ad_util.zero if padding_value is None else None]
lo, hi, interior = zip(*padding_config)
total = lambda x: _reduce_sum(x, list(range(t.ndim)))
def t_op():
unpad_config = zip(onp.negative(lo), onp.negative(hi), onp.zeros_like(interior))
unpadded = pad(t, onp.array(0., t.dtype), unpad_config)
return slice(unpadded, onp.zeros_like(lo), unpadded.shape, onp.add(interior, 1))
t_operand = t_op() if operand is None else None
t_padv = sub(total(t), total(t_operand)) if padding_value is None else None
return [t_operand, t_padv]
def _pad_batch_rule(batched_args, batch_dims, padding_config):
operand, padding_value = batched_args
operand_bdim, padding_value_bdim = batch_dims
if padding_value_bdim is None:
assert operand_bdim is not None
padding_config = list(padding_config)
padding_config.insert(operand_bdim, (0, 0, 0))
return pad(operand, padding_value, padding_config), operand_bdim
else:
raise NotImplementedError # loop and stack
pad_p = standard_primitive(_pad_shape_rule, _input_dtype, 'pad')
ad.deflinear(pad_p, _pad_transpose)
ad.primitive_transposes[pad_p] = _pad_transpose
batching.primitive_batchers[pad_p] = _pad_batch_rule
# We have a nonstandard reshape impl so that we can be lazy about data movement
# for specific types, particularly ShardedDeviceArrays / ChunkedDeviceArrays
def _reshape_impl(operand, new_sizes, dimensions, old_sizes):
if (type(operand) is pxla.ShardedDeviceArray and dimensions is None
and _is_axis_merge(old_sizes, new_sizes)):
aval = ShapedArray(new_sizes, operand.dtype)
return pxla.ChunkedDeviceArray(old_sizes[0], aval, operand.device_buffers)
elif (type(operand) is pxla.ChunkedDeviceArray and dimensions is None
and _is_axis_split(old_sizes, new_sizes)
and operand.axis_size == new_sizes[0]):
aval = ShapedArray(new_sizes, operand.dtype)
return pxla.ShardedDeviceArray(aval, operand.device_buffers)
else:
return xla.apply_primitive(reshape_p, operand, new_sizes=new_sizes,
dimensions=dimensions, old_sizes=old_sizes)
def _is_axis_merge(s1, s2):
return s1[2:] == s2[1:] and s1[0] * s1[1] == s2[0]
def _is_axis_split(s1, s2):
return _is_axis_merge(s2, s1)
def _reshape_shape_rule(operand, new_sizes, dimensions, **unused_kwargs):
if not onp.all(onp.greater_equal(new_sizes, 0)):
msg = 'reshape new_sizes must all be positive, got {}.'
raise TypeError(msg.format(new_sizes))
if prod(onp.shape(operand)) != prod(new_sizes):
msg = 'reshape total size must be unchanged, got new_sizes {} for shape {}.'
raise TypeError(msg.format(new_sizes, onp.shape(operand)))
if dimensions is not None:
if set(dimensions) != set(range(onp.ndim(operand))):
msg = ('reshape dimensions must be a permutation of operand dimensions, '
'got dimensions {} for shape {}.')
raise TypeError(msg.format(dimensions, onp.shape(operand)))
return tuple(new_sizes)
def _reshape_dtype_rule(operand, new_sizes, dimensions, **unused_kwargs):
return operand.dtype
def _reshape_translation_rule(c, operand, new_sizes, dimensions, old_sizes):
del old_sizes # Unused.
return c.Reshape(operand, new_sizes=new_sizes, dimensions=dimensions)
def _reshape_transpose_rule(t, new_sizes, dimensions, old_sizes):
if dimensions is None:
return [reshape(t, old_sizes)]
else:
return [transpose(reshape(t, onp.take(old_sizes, dimensions)),
onp.argsort(dimensions))]
def _reshape_batch_rule(batched_args, batch_dims, new_sizes, dimensions, **unused):
operand, = batched_args
bdim, = batch_dims
operand = batching.move_dim_to_front(operand, bdim)
if dimensions is not None:
raise NotImplementedError # TODO(mattjj): handle reshape w/ dimensions
dimensions = (0,) + tuple(onp.add(1, dimensions))
return reshape(operand, operand.shape[:1] + new_sizes, dimensions), 0
reshape_p = standard_primitive(_reshape_shape_rule, _reshape_dtype_rule,
'reshape', _reshape_translation_rule)
reshape_p.def_impl(_reshape_impl)
ad.deflinear(reshape_p, _reshape_transpose_rule)
batching.primitive_batchers[reshape_p] = _reshape_batch_rule
def _rev_shape_rule(operand, dimensions):
_check_shapelike('rev', 'dimensions', dimensions)
if len(set(dimensions)) != len(dimensions):
msg = 'rev dimensions must be unique, got {}.'
raise TypeError(msg.format(dimensions))
if not _max(dimensions) < operand.ndim:
msg = ('rev dimensions must all be less than operand ndim, got dimensions '
'{} for operand ndim {}.')
raise TypeError(msg.format(dimensions, operand.ndim))
return operand.shape
def _rev_batch_rule(batched_args, batch_dims, dimensions):
operand, = batched_args
bdim, = batch_dims
new_dimensions = [i + 1 if i >= bdim else i for i in dimensions]
return rev(operand, new_dimensions), bdim
rev_p = standard_primitive(_rev_shape_rule, _input_dtype, 'rev')
ad.deflinear(rev_p, lambda t, dimensions: [rev(t, dimensions)])
batching.primitive_batchers[rev_p] = _rev_batch_rule
def _transpose_shape_rule(operand, permutation):
if not isinstance(permutation, (tuple, list, onp.ndarray)):
msg = "transpose permutation must be a tuple/list/ndarray, got {}."
raise TypeError(msg.format(type(permutation)))
if tuple(sorted(permutation)) != tuple(range(operand.ndim)):
msg = ("transpose permutation isn't a permutation of operand dimensions, "
"got permutation {} for operand shape {}.")
raise TypeError(msg.format(permutation, operand.shape))
return tuple(onp.take(operand.shape, permutation))
def _transpose_batch_rule(batched_args, batch_dims, permutation):
operand, = batched_args
bdim, = batch_dims
perm = (bdim,) + tuple(i if i < bdim else i+1 for i in permutation)
return transpose(operand, perm), 0
transpose_p = standard_primitive(_transpose_shape_rule, _input_dtype,
'transpose')
ad.deflinear(transpose_p,
lambda t, permutation: [transpose(t, onp.argsort(permutation))])
batching.primitive_batchers[transpose_p] = _transpose_batch_rule
def _select_shape_rule(pred, on_true, on_false):
if on_true.shape != on_false.shape:
msg = "select on_true and on_false must have the same shape, got {} and {}."
raise TypeError(msg.format(on_true.shape, on_false.shape))
if pred.shape and pred.shape != on_true.shape:
msg = ("select pred must be scalar or have the same shape as on_true and "
"on_false, got pred shape {} for on_true and on_false of shape {}.")
raise TypeError(msg.format(pred.shape, on_true.shape))
return on_true.shape
def _select_dtype_rule(pred, on_true, on_false):
_check_same_dtypes("select", False, on_true.dtype, on_false.dtype)
if not onp.issubdtype(pred.dtype, onp.bool_):
msg = "select pred must be boolean type, got {}."
raise TypeError(msg.format(pred.dtype))
return on_true.dtype
def _select_transpose_rule(t, pred, on_true, on_false):
assert pred is not None
if t is ad_util.zero:
return [None,
ad_util.zero if on_true is None else None,
ad_util.zero if on_false is None else None]
else:
zeros = full_like(t, 0)
return [None,
select(pred, t, zeros) if on_true is None else None,
select(pred, zeros, t) if on_false is None else None]
def _select_batch_rule(batched_args, batch_dims, **unused_kwargs):
pred, on_true, on_false, = batched_args
pred_bdim, ot_bdim, of_bdim = batch_dims
size = next(x.shape[i] for x, i in zip(batched_args, batch_dims)
if i is not None)
# avoid transposes and some broadcasts in special cases
if pred_bdim == ot_bdim == of_bdim:
if onp.shape(pred) == onp.shape(on_true):
return select(pred, on_true, on_false), pred_bdim
else:
# vmapped function had a scalar pred with nonscalar args
assert onp.ndim(pred) == 1
pred = broadcast_in_dim(pred, on_true.shape, [pred_bdim])
return select(pred, on_true, on_false), pred_bdim
elif onp.ndim(pred) == 0 and ot_bdim is not None and of_bdim is not None:
if ot_bdim == of_bdim:
return select(pred, on_true, on_false), ot_bdim
elif onp.shape(on_true) == onp.shape(on_false):
on_false = batching.moveaxis(size, ot_bdim, of_bdim, on_false)
return select(pred, on_true, on_false), ot_bdim
pred = batching.bdim_at_front(pred, pred_bdim, size, force_broadcast=True)
on_true = batching.bdim_at_front(on_true, ot_bdim, size, force_broadcast=True)
on_false = batching.bdim_at_front(on_false, of_bdim, size, force_broadcast=True)
assert onp.shape(on_true) == onp.shape(on_false)
if 0 < onp.ndim(pred) < onp.ndim(on_true):
# vmapped function had a scalar pred with nonscalar args
assert onp.ndim(pred) == 1
pred = broadcast_in_dim(pred, on_true.shape, [0])
return select(pred, on_true, on_false), 0
select_p = standard_primitive(_select_shape_rule, _select_dtype_rule, 'select')
ad.defjvp(select_p,
None,
lambda g, b, x, y: select(b, g, _zeros(g)),
lambda g, b, x, y: select(b, _zeros(g), g))
ad.primitive_transposes[select_p] = _select_transpose_rule
batching.primitive_batchers[select_p] = _select_batch_rule
def _slice_shape_rule(operand, start_indices, limit_indices, strides,
operand_shape):
_check_shapelike("slice", "start_indices", start_indices)
_check_shapelike("slice", "limit_indices", limit_indices)
if operand.ndim != len(start_indices):
msg = ("slice start_indices must have length equal to the number of "
"dimensions of the operand, got indices {} for operand shape {}.")
raise TypeError(msg.format(start_indices, operand.shape))
if len(start_indices) != len(limit_indices):
msg = ("slice limit_indices must have the same length as start_indices, "
"got start_inidices {} and limit_indices {}.")
raise TypeError(msg.format(start_indices, limit_indices))
if not onp.all(onp.less_equal(limit_indices, operand.shape)):
msg = ("slice limit_indices must be less than or equal to operand shape, "
"got limit_indices {} for operand shape {}.")
raise TypeError(msg.format(limit_indices, operand.shape))
if not onp.all(onp.greater_equal(start_indices, 0)):
msg = ("slice start_indices must be greater than or equal to zero, "
"got start_indices of {}.")
raise TypeError(msg.format(start_indices))
if not onp.all(onp.greater_equal(limit_indices, start_indices)):
msg = ("slice limit_indices must be greater than or equal to start_indices,"
" got start_indices {} and limit_indices {}.")
raise TypeError(msg.format(start_indices, limit_indices))
if strides is None:
strides = onp.ones(operand.ndim, onp.int32)
else:
_check_shapelike("slice", "strides", strides)
if len(strides) != operand.ndim:
msg = ("slice strides must have length equal to the number of dimensions "
"of the operand, got strides {} for operand shape {}.")
raise TypeError(msg.format(strides, operand.shape))
if not onp.all(onp.greater(strides, 0)):
msg = "slice strides must be positive, got {}"
raise TypeError(msg.format(strides))
result_shape = onp.floor_divide(
onp.add(onp.subtract(limit_indices, start_indices), strides) - 1, strides)
return tuple(result_shape)
def _slice_translation_rule(c, operand, start_indices, limit_indices, strides,
operand_shape):
return c.Slice(operand, start_indices, limit_indices, strides)
def _slice_transpose_rule(t, start_indices, limit_indices, strides,
operand_shape):
if strides is None or onp.all(onp.equal(strides, 1)):
pads = zip(start_indices, onp.subtract(operand_shape, limit_indices),
(0,) * len(start_indices))
else:
real_limits = onp.add(onp.add(start_indices, 1),
onp.multiply(onp.subtract(t.shape, 1), strides))
pads = zip(start_indices, onp.subtract(operand_shape, real_limits),
onp.subtract(strides, 1))
result = pad(t, _const(t, 0), pads)
assert result.shape == operand_shape
return [result]
def _slice_batching_rule(batched_args, batch_dims, start_indices, limit_indices,
strides, **unused_kwargs):
operand, = batched_args
bdim, = batch_dims
new_start_indices = list(start_indices)
new_start_indices.insert(bdim, 0)
new_limit_indices = list(limit_indices)
new_limit_indices.insert(bdim, operand.shape[bdim])
if strides is None:
new_strides = None
else:
new_strides = list(strides)
new_strides.insert(bdim, 1)
out = slice(operand, new_start_indices, new_limit_indices, new_strides)
return out, bdim
slice_p = standard_primitive(_slice_shape_rule, _input_dtype, 'slice',
_slice_translation_rule)
ad.deflinear(slice_p, _slice_transpose_rule)
batching.primitive_batchers[slice_p] = _slice_batching_rule
def _dynamic_slice_shape_rule(operand, start_indices, slice_sizes,
operand_shape):
if operand.ndim != len(start_indices):
msg = ("dynamic_slice start_indices must have length equal to the number "
"of dimensions of the operand, got indices {} for operand shape {}.")
raise TypeError(msg.format(start_indices, operand.shape))
if len(start_indices) != len(slice_sizes):
msg = ("dynamic_slice slice_sizes must have the same length as "
"start_indices, got start_inidices length {} and slice_sizes {}.")
raise TypeError(msg.format(len(start_indices), slice_sizes))
if not onp.all(onp.less_equal(slice_sizes, operand.shape)):
msg = ("slice slice_sizes must be less than or equal to operand shape, "
"got slice_sizes {} for operand shape {}.")
raise TypeError(msg.format(slice_sizes, operand.shape))
if not onp.all(onp.greater_equal(slice_sizes, 0)):
msg = ("slice slice_sizes must be greater than or equal to zero, "
"got slice_sizes of {}.")
raise TypeError(msg.format(slice_sizes))
return tuple(slice_sizes)
def _dynamic_slice_translation_rule(c, operand, start_indices, slice_sizes,
operand_shape):
return c.DynamicSlice(operand, start_indices, slice_sizes)
def _dynamic_slice_jvp_rule(g, operand, start_indices, slice_sizes,
operand_shape):
return dynamic_slice(g, start_indices, slice_sizes)
def _dynamic_slice_transpose_rule(t, operand, start_indices, slice_sizes,
operand_shape):
assert operand is None
zeros = full(operand_shape, tie_in(t, _zero(t)))
return [dynamic_update_slice(zeros, t, start_indices), ad_util.zero]
def _dynamic_slice_batching_rule(batched_args, batch_dims, slice_sizes,
operand_shape):
# A dynamic slice is a special case of gather; we can delegate to the gather
# batching rule.
# TODO(phawkins): consider removing dynamic_slice entirely and using gather
# always.
dims = tuple(range(len(operand_shape)))
dnums = GatherDimensionNumbers(offset_dims=dims, collapsed_slice_dims=(),
start_index_map=dims)
return _gather_batching_rule(batched_args, batch_dims, dnums, slice_sizes,
operand_shape)
dynamic_slice_p = standard_primitive(
_dynamic_slice_shape_rule, _input_dtype, 'dynamic_slice',
_dynamic_slice_translation_rule)
ad.defjvp(dynamic_slice_p, _dynamic_slice_jvp_rule, None)
ad.primitive_transposes[dynamic_slice_p] = _dynamic_slice_transpose_rule
batching.primitive_batchers[dynamic_slice_p] = _dynamic_slice_batching_rule
def _dynamic_update_slice_shape_rule(operand, update, start_indices,
update_shape):
if operand.ndim != update.ndim:
msg = ("dynamic_update_slice update must have the same rank as operand, "
"got update shape {} for operand shape {}.")
raise TypeError(msg.format(update.shape, operand.shape))
if operand.ndim != len(start_indices):
msg = ("dynamic_update_slice start_indices must have length equal to the "
"rank of operand, got indices {} for operand shape {}.")
raise TypeError(msg.format(start_indices, operand.shape))
if not onp.all(onp.less_equal(update.shape, operand.shape)):
msg = ("dynamic_update_slice update shape must be smaller than operand "
"shape, got update shape {} for operand shape {}.")
raise TypeError(msg.format(update.shape, operand.shape))
return operand.shape
def _dynamic_update_slice_dtype_rule(operand, update, start_indices,
update_shape):
_check_same_dtypes("dynamic_update_slice", False, operand.dtype, update.dtype)
return operand.dtype
def _dynamic_update_slice_jvp(primals, tangents, update_shape):
operand, update, start_indices = primals
g_operand, g_update, g_start_indices = tangents
val_out = dynamic_update_slice(operand, update, start_indices)
if g_operand is ad_util.zero and g_update is ad_util.zero:
tangent_out = ad_util.zero
else:
g_operand = ad.instantiate_zeros(operand, g_operand)
g_update = ad.instantiate_zeros(update, g_update)
tangent_out = dynamic_update_slice(g_operand, g_update, start_indices)
return val_out, tangent_out
def _dynamic_update_slice_transpose_rule(t, operand, update, start_indices,
update_shape):
assert start_indices is not None
dus = dynamic_update_slice
ds = dynamic_slice
zeros = _zeros(t, shape=update_shape)
operand_t = dus(t, zeros, start_indices) if operand is None else None
update_t = ds(t, start_indices, update_shape) if update is None else None
return [operand_t, update_t, None]
def _dynamic_update_slice_translation_rule(c, operand, update, start_indices,
update_shape):
return c.DynamicUpdateSlice(operand, update, start_indices)
def _dynamic_update_slice_batching_rule(batched_args, batch_dims, update_shape):
# A dynamic update slice is a special case of scatter; we can delegate to the
# scatter batching rule.
# TODO(phawkins): consider removing dynamic_update_slice entirely and using
# scatter always.
operand, update, index = batched_args
operand_bdims, update_bdims, index_bdims = batch_dims
dims = tuple(range(len(update_shape)))
dnums = ScatterDimensionNumbers(update_window_dims=dims,
inserted_window_dims=(),
scatter_dims_to_operand_dims=dims)
return _scatter_batching_rule(
scatter,
(operand, index, update), (operand_bdims, index_bdims, update_bdims),
None, None, dnums, update_shape)
dynamic_update_slice_p = standard_primitive(
_dynamic_update_slice_shape_rule, _dynamic_update_slice_dtype_rule,
'dynamic_update_slice', _dynamic_update_slice_translation_rule)
ad.primitive_jvps[dynamic_update_slice_p] = _dynamic_update_slice_jvp
ad.primitive_transposes[dynamic_update_slice_p] = \
_dynamic_update_slice_transpose_rule
batching.primitive_batchers[dynamic_update_slice_p] = \
_dynamic_update_slice_batching_rule
class GatherDimensionNumbers(collections.namedtuple(
"GatherDimensionNumbers",
["offset_dims", "collapsed_slice_dims", "start_index_map"])):
"""
Describes the dimension number arguments to an `XLA's Gather operator
<https://www.tensorflow.org/xla/operation_semantics#gather>`_. See the XLA
documentation for more details of what the dimension numbers mean.
Args:
offset_dims: the set of dimensions in the `gather` output that offset into
an array sliced from `operand`. Must be a tuple of integers in ascending
order, each representing a dimension number of the output.
collapsed_slice_dims: the set of dimensions `i` in `operand` that have
`slice_sizes[i] == 1` and that should not have a corresponding dimension
in the output of the gather. Must be a tuple of integers in ascending
order.
start_index_map: for each dimension in `start_indices`, gives the
corresponding dimension in `operand` that is to be sliced. Must be a
tuple of integers with size equal to `start_indices.shape[-1]`.
Unlike XLA's `GatherDimensionNumbers` structure, `index_vector_dim` is
implicit; there is always an index vector dimension and it must always be the
last dimension. To gather scalar indices, add a trailing dimension of size 1.
"""
def _gather_dimensions_proto(indices_shape, dimension_numbers):
assert type(dimension_numbers) is GatherDimensionNumbers
proto = xla_client.GatherDimensionNumbers()
proto.offset_dims.extend(dimension_numbers.offset_dims)
proto.collapsed_slice_dims.extend(dimension_numbers.collapsed_slice_dims)
proto.start_index_map.extend(dimension_numbers.start_index_map)
assert indices_shape.rank() > 0
proto.index_vector_dim = indices_shape.rank() - 1
return proto
def _gather_dtype_rule(operand, start_indices, **kwargs):
if not onp.issubdtype(start_indices.dtype, onp.integer):
raise ValueError("start_indices must have an integer type")
return xla_bridge.canonicalize_dtype(operand.dtype)
def _gather_shape_rule(operand, start_indices, dimension_numbers, slice_sizes,
operand_shape):
assert operand.shape == operand_shape
if len(operand_shape) != len(slice_sizes):
msg = ("slice_sizes must have rank equal to the gather operand; "
"operand.shape={}, slice_sizes={}".format(operand_shape, slice_sizes))
raise ValueError(msg)
result_rank = len(dimension_numbers.offset_dims) + start_indices.ndim - 1
start_indices_shape = iter(start_indices.shape[:-1])
slice_sizes = iter(onp.delete(slice_sizes, dimension_numbers.collapsed_slice_dims))
return tuple(next(slice_sizes) if i in dimension_numbers.offset_dims
else next(start_indices_shape) for i in range(result_rank))
def _gather_translation_rule(c, operand, start_indices, dimension_numbers,
slice_sizes, operand_shape):
indices_shape = c.GetShape(start_indices)
return c.Gather(
operand, start_indices,
_gather_dimensions_proto(indices_shape, dimension_numbers), slice_sizes)
def _gather_jvp_rule(g, operand, start_indices, dimension_numbers, slice_sizes,
operand_shape):
return gather(g, start_indices, dimension_numbers, slice_sizes)
def _gather_transpose_rule(t, operand, start_indices, dimension_numbers,
slice_sizes, operand_shape):
assert operand is None
if t is ad_util.zero:
return [ad_util.zero, ad_util.zero]
zeros = full(operand_shape, tie_in(t, _zero(t)))
scatter_dnums = ScatterDimensionNumbers(
update_window_dims=dimension_numbers.offset_dims,
inserted_window_dims=dimension_numbers.collapsed_slice_dims,
scatter_dims_to_operand_dims=dimension_numbers.start_index_map)
return [scatter_add(zeros, start_indices, t, scatter_dnums), ad_util.zero]
def _gather_batching_rule(batched_args, batch_dims, dimension_numbers,
slice_sizes, operand_shape):
operand, start_indices = batched_args
operand_bdim, start_indices_bdim = batch_dims
if operand_bdim is not None and start_indices_bdim is None:
operand = batching.move_dim_to_front(operand, operand_bdim)
slice_sizes = (operand.shape[0],) + slice_sizes
offset_dims = (0,) + tuple(onp.add(1, dimension_numbers.offset_dims))
collapsed_slice_dims = tuple(onp.add(1, dimension_numbers.collapsed_slice_dims))
start_index_map = tuple(onp.add(1, dimension_numbers.start_index_map))
dnums = GatherDimensionNumbers(
offset_dims=offset_dims,
collapsed_slice_dims=collapsed_slice_dims,
start_index_map=start_index_map)
return gather(operand, start_indices, dimension_numbers=dnums,
slice_sizes=slice_sizes), 0
elif operand_bdim is None and start_indices_bdim is not None:
start_indices = batching.move_dim_to_front(start_indices, start_indices_bdim)
offset_dims = tuple(onp.add(1, dimension_numbers.offset_dims))
dnums = GatherDimensionNumbers(
offset_dims=offset_dims,
collapsed_slice_dims=dimension_numbers.collapsed_slice_dims,
start_index_map=dimension_numbers.start_index_map)
return gather(operand, start_indices, dimension_numbers=dnums,
slice_sizes=slice_sizes), 0
else:
# move our batch dimensions to the front to preserve sanity
operand = batching.move_dim_to_front(operand, operand_bdim)
start_indices = batching.move_dim_to_front(start_indices, start_indices_bdim)
# Example: user code had start_indices shape (3, 4, 5), and we have to deal
# with start_indices shape (7, 3, 4, 5). We transform that to a
# start_indices of shape (7, 3, 4, 6) where we concatenated an iota that
# counts along our batch dimension to the front of the ndindex.
count_shape = list(start_indices.shape)
count_shape[-1] = 1
counts = broadcasted_iota(start_indices.dtype, tuple(count_shape), 0)
start_indices = concatenate([counts, start_indices], len(count_shape) - 1)
slice_sizes = (1,) + slice_sizes
collapsed_slice_dims = (0,) + tuple(onp.add(1, dimension_numbers.collapsed_slice_dims))
offset_dims = tuple(onp.add(1, dimension_numbers.offset_dims))
start_index_map = (0,) + tuple(onp.add(1, dimension_numbers.start_index_map))
dnums = GatherDimensionNumbers(
offset_dims=offset_dims,
collapsed_slice_dims=collapsed_slice_dims,
start_index_map=start_index_map)
return gather(operand, start_indices, dimension_numbers=dnums,
slice_sizes=slice_sizes), 0
gather_p = standard_primitive(
_gather_shape_rule, _gather_dtype_rule, 'gather',
_gather_translation_rule)
ad.defjvp(gather_p, _gather_jvp_rule, None)
ad.primitive_transposes[gather_p] = _gather_transpose_rule
batching.primitive_batchers[gather_p] = _gather_batching_rule
class ScatterDimensionNumbers(collections.namedtuple(
"ScatterDimensionNumbers",
["update_window_dims", "inserted_window_dims",
"scatter_dims_to_operand_dims"])):
"""
Describes the dimension number arguments to an `XLA's Scatter operator
<https://www.tensorflow.org/xla/operation_semantics#scatter>`_. See the XLA
documentation for more details of what the dimension numbers mean.
Args:
update_window_dims: the set of dimensions in the `updates` that are window
dimensions. Must be a tuple of integers in ascending
order, each representing a dimension number.
inserted_window_dims: the set of size 1 window dimensions that must be inserted
into the shape of `updates`. Must be a tuple of integers in ascending
order, each representing a dimension number of the output. These are the
mirror image of `collapsed_slice_dims` in the case of `gather`.
scatter_dims_to_operand_dims: for each dimension in `scatter_indices`, gives
the corresponding dimension in `operand`. Must be a sequence of integers
with size equal to indices.shape[-1].
Unlike XLA's `ScatterDimensionNumbers` structure, `index_vector_dim` is
implicit; there is always an index vector dimension and it must always be the
last dimension. To scatter scalar indices, add a trailing dimension of size 1.
"""
def _scatter_dimensions_proto(indices_shape, dimension_numbers):
assert type(dimension_numbers) is ScatterDimensionNumbers
proto = xla_client.ScatterDimensionNumbers()
proto.update_window_dims.extend(dimension_numbers.update_window_dims)
proto.inserted_window_dims.extend(dimension_numbers.inserted_window_dims)
proto.scatter_dims_to_operand_dims.extend(
dimension_numbers.scatter_dims_to_operand_dims)
assert indices_shape.rank() > 0
proto.index_vector_dim = indices_shape.rank() - 1
return proto
def _scatter_dtype_rule(operand, scatter_indices, updates, **kwargs):
if not onp.issubdtype(scatter_indices.dtype, onp.integer):
raise ValueError("scatter_indices must have an integer type")
_check_same_dtypes("scatter", False, operand.dtype, updates.dtype)
return xla_bridge.canonicalize_dtype(operand.dtype)
def _scatter_shape_rule(operand, scatter_indices, updates, **kwargs):
return operand.shape
def _scatter_translation_rule(c, operand, scatter_indices, updates,
update_jaxpr, update_consts, dimension_numbers,
updates_shape):
dtype = c.GetShape(operand).numpy_dtype()
init_value = c.Constant(onp.array(0, dtype))
update_computation = _reduction_computation(
c, update_jaxpr, update_consts, init_value)
indices_shape = c.GetShape(scatter_indices)
return c.Scatter(operand, scatter_indices, updates, update_computation,
_scatter_dimensions_proto(indices_shape, dimension_numbers))
def _scatter_add_jvp(primals, tangents, update_jaxpr, update_consts,
dimension_numbers, updates_shape):
operand, scatter_indices, updates = primals
g_operand, g_scatter_indices, g_updates = tangents
val_out = scatter_add_p.bind(
operand, scatter_indices, updates, update_jaxpr=update_jaxpr,
update_consts=update_consts, dimension_numbers=dimension_numbers,
updates_shape=updates_shape)
if g_operand is ad_util.zero and g_updates is ad_util.zero:
tangent_out = ad_util.zero
else:
g_operand = ad.instantiate_zeros(operand, g_operand)
g_updates = ad.instantiate_zeros(updates, g_updates)
tangent_out = scatter_add_p.bind(
g_operand, scatter_indices, g_updates, update_jaxpr=update_jaxpr,
update_consts=update_consts, dimension_numbers=dimension_numbers,
updates_shape=updates_shape)
return val_out, tangent_out
def _scatter_add_transpose_rule(t, operand, scatter_indices, updates,
update_jaxpr, update_consts, dimension_numbers,
updates_shape):
assert scatter_indices is not None
if t is ad_util.zero:
return [ad_util.zero, None, ad_util.zero]
operand_t = update_t = None
if operand is None:
operand_t = t
if updates is None:
gather_dnums = GatherDimensionNumbers(
offset_dims=dimension_numbers.update_window_dims,
collapsed_slice_dims=dimension_numbers.inserted_window_dims,
start_index_map=dimension_numbers.scatter_dims_to_operand_dims)
slice_sizes = []
pos = 0
for i in xrange(len(t.shape)):
if i in dimension_numbers.inserted_window_dims:
slice_sizes.append(1)
else:
slice_sizes.append(updates_shape[dimension_numbers.update_window_dims[pos]])
pos += 1
update_t = gather(t, scatter_indices, dimension_numbers=gather_dnums,
slice_sizes=slice_sizes)
return [operand_t, None, update_t]
def _scatter_batching_rule(
scatter_op, batched_args, batch_dims, update_jaxpr, update_consts,
dimension_numbers, updates_shape):
operand, scatter_indices, updates = batched_args
operand_bdim, scatter_indices_bdim, updates_bdim = batch_dims
del update_jaxpr, update_consts, updates_shape # Unused.
# move the operand batch dim to the front if it is not None, otherwise create
# it at the front (so that we can scatter into it)
size = next(x.shape[ax] for x, ax in zip(batched_args, batch_dims)
if ax is not None)
operand = batching.bdim_at_front(operand, operand_bdim, broadcast_size=size,
force_broadcast=True)
operand_bdim = 0
if scatter_indices_bdim is not None and updates_bdim is None:
updates = broadcast(updates, (size,))
updates_bdim = 0
if scatter_indices_bdim is None and updates_bdim is not None:
updates = batching.move_dim_to_front(updates, updates_bdim)
inserted_window_dims = tuple(onp.add(1, dimension_numbers.inserted_window_dims))
update_window_dims = (0,) + tuple(onp.add(1, dimension_numbers.update_window_dims))
scatter_dims_to_operand_dims = tuple(onp.add(1, dimension_numbers.scatter_dims_to_operand_dims))
dnums = ScatterDimensionNumbers(
update_window_dims=update_window_dims,
inserted_window_dims=inserted_window_dims,
scatter_dims_to_operand_dims=scatter_dims_to_operand_dims)
return scatter_op(operand, scatter_indices, updates, dnums), 0
else:
# see the third case in _gather_batching_rule for comparison and comments
scatter_indices = batching.move_dim_to_front(scatter_indices,
scatter_indices_bdim)
updates = batching.move_dim_to_front(updates, updates_bdim)
count_shape = list(scatter_indices.shape)
count_shape[-1] = 1
counts = broadcasted_iota(scatter_indices.dtype, tuple(count_shape), 0)
scatter_indices = concatenate([counts, scatter_indices],
len(count_shape) - 1)
update_window_dims = tuple(onp.add(1, dimension_numbers.update_window_dims))
inserted_window_dims = (0,) + tuple(onp.add(1, dimension_numbers.inserted_window_dims))
scatter_dims_to_operand_dims = (0,) + tuple(onp.add(1, dimension_numbers.scatter_dims_to_operand_dims))
dnums = ScatterDimensionNumbers(
update_window_dims=update_window_dims,
inserted_window_dims=inserted_window_dims,
scatter_dims_to_operand_dims=scatter_dims_to_operand_dims)
return scatter_op(operand, scatter_indices, updates, dnums), 0
scatter_add_p = standard_primitive(
_scatter_shape_rule, _scatter_dtype_rule, 'scatter-add',
_scatter_translation_rule)
ad.primitive_jvps[scatter_add_p] = _scatter_add_jvp
ad.primitive_transposes[scatter_add_p] = _scatter_add_transpose_rule
batching.primitive_batchers[scatter_add_p] = (
partial(_scatter_batching_rule, scatter_add))
# TODO(jlebar): Add derivatives.
scatter_min_p = standard_primitive(
_scatter_shape_rule, _scatter_dtype_rule, 'scatter-min',
_scatter_translation_rule)
batching.primitive_batchers[scatter_min_p] = (
partial(_scatter_batching_rule, scatter_min))
# TODO(jlebar): Add derivatives.
scatter_max_p = standard_primitive(
_scatter_shape_rule, _scatter_dtype_rule, 'scatter-max',
_scatter_translation_rule)
batching.primitive_batchers[scatter_max_p] = (
partial(_scatter_batching_rule, scatter_max))
def _scatter_jvp(primals, tangents, update_jaxpr, update_consts,
dimension_numbers, updates_shape):
operand, scatter_indices, updates = primals
g_operand, g_scatter_indices, g_updates = tangents
dnums = dimension_numbers
if g_operand is ad_util.zero and g_updates is ad_util.zero:
val_out = scatter_p.bind(
operand, scatter_indices, updates, update_jaxpr=update_jaxpr,
update_consts=update_consts, dimension_numbers=dnums,
updates_shape=updates_shape)
tangent_out = ad_util.zero
return val_out, tangent_out
# If there are overlapping indices in the scatter, it is unspecified which
# update "wins". So we use the following perhaps surprising scheme:
# a) attach a positive ID to each update in updates, forming (value, id) pairs
# (using a new array dimension because scatter doesn't actually support
# pairs).
# b) perform the scatter, yielding (value, id) updates, which we split apart.
# c) perform the inverse gather on the ids (similar to
# _scatter_add_transpose), and use it to build a mask for the tangent of
# `updates`.
# d) perform a scatter-add on the masked JVP values. A benefit of using
# scatter-add here is that we don't need a `scatter` transpose rule.
# a) add unique positive IDs (iotas) to the updates, and zeros to the operand.
operand_shape = operand.shape
updates_shape = updates.shape
updates_dtype = _dtype(updates)
new_operand = reshape(operand, (1,) + operand_shape)
new_operand = pad(new_operand, _zero(operand),
((0, 1, 0),) + tuple((0, 0, 0) for _ in operand_shape))
ids_shape = onp.array(updates_shape)
ids_shape[dnums.update_window_dims,] = 1
num_ids = onp.prod(ids_shape)
update_ids = add(reshape(iota(updates_dtype, num_ids), ids_shape),
_ones(updates))
# TODO(phawkins): there is a potential bug here if the number of updates
# is large enough to overflow the number of mantissa bits in a float so IDs
# end up colliding. We could also utilize the exponent and sign bits, with a
# little more work.
assert num_ids < (2 ** onp.finfo(updates_dtype).nmant)
updates = reshape(updates, (1,) + updates_shape)
reshaped_update_ids = reshape(update_ids, (1,) + updates_shape)
updates_and_ids = concatenate((updates, reshaped_update_ids), 0)
new_dnums = ScatterDimensionNumbers(
update_window_dims=(0,) + tuple(d + 1 for d in dnums.update_window_dims),
inserted_window_dims=tuple(d + 1 for d in dnums.inserted_window_dims),
scatter_dims_to_operand_dims=tuple(d + 1 for d in dnums.scatter_dims_to_operand_dims))
outputs = scatter_p.bind(
new_operand, scatter_indices, updates_and_ids, update_jaxpr=update_jaxpr,
update_consts=update_consts, dimension_numbers=new_dnums,
updates_shape=updates_shape)
val_out = index_in_dim(outputs, 0, keepdims=False)
scattered_ids = index_in_dim(outputs, 1, keepdims=False)
# b) compute the inverse gather that "undoes" the scatter on the id values.
gather_dnums = GatherDimensionNumbers(
offset_dims=dnums.update_window_dims,
collapsed_slice_dims=dnums.inserted_window_dims,
start_index_map=dnums.scatter_dims_to_operand_dims)
slice_sizes = []
pos = 0
for i in xrange(len(scattered_ids.shape)):
if i in dnums.inserted_window_dims:
slice_sizes.append(1)
else:
slice_sizes.append(updates_shape[dnums.update_window_dims[pos]])
pos += 1
gathered_update_ids = gather(scattered_ids, scatter_indices,
dimension_numbers=gather_dnums,
slice_sizes=slice_sizes)
# c) mask off input JVP elements that do not correspond to a primal output.
g_operand = ad.instantiate_zeros(operand, g_operand)
g_updates = ad.instantiate_zeros(updates, g_updates)
masked_g_operand = select(eq(scattered_ids, _zeros(scattered_ids)),
g_operand, _zeros(g_operand))
masked_g_updates = select(eq(update_ids, gathered_update_ids),
g_updates, _zeros(g_updates))
# d) perform a scatter-add to compute the tangent output.
tangent_out = scatter_add(masked_g_operand, scatter_indices, masked_g_updates,
dimension_numbers=dnums)
return val_out, tangent_out
scatter_p = standard_primitive(
_scatter_shape_rule, _scatter_dtype_rule, 'scatter',
_scatter_translation_rule)
ad.primitive_jvps[scatter_p] = _scatter_jvp
batching.primitive_batchers[scatter_p] = (
partial(_scatter_batching_rule, scatter))
def _reduce_shape_rule(operand, init_value, computation, jaxpr, consts, dimensions):
return tuple(onp.delete(operand.shape, dimensions))
def _reduce_translation_rule(c, operand, init_value, computation, jaxpr, consts, dimensions):
xla_computation = _reduction_computation(c, jaxpr, consts, init_value)
return c.Reduce(operand, init_value, xla_computation, dimensions)
def _reduce_batch_rule(batched_args, batch_dims, computation, jaxpr, consts, dimensions):
operand, init_value = batched_args
operand_bdim, init_value_bdim = batch_dims
if init_value_bdim is None:
assert operand_bdim is not None
new_dimensions = [d + bool(d >= operand_bdim) for d in dimensions]
new_operand_bdim = operand_bdim - onp.sum(onp.less(dimensions, operand_bdim))
return reduce(operand, init_value, computation, new_dimensions), new_operand_bdim
else:
raise NotImplementedError # loop and stack
def _reduction_computation(c, jaxpr, consts, init_value):
shape = c.GetShape(init_value)
return xla.jaxpr_computation(jaxpr, consts, (), shape, shape)
reduce_p = standard_primitive(_reduce_shape_rule, _input_dtype, 'reduce',
_reduce_translation_rule)
# batching.primitive_batchers[reduce_p] = _reduce_batch_rule # TODO(mattjj): test
def _reduce_sum_shape_rule(operand, axes, input_shape):
assert operand.shape == input_shape, ('{} != {}'
.format(operand.shape, input_shape))
return tuple(onp.delete(operand.shape, axes))
def _reduce_sum_translation_rule(c, operand, axes, input_shape):
dtype = c.GetShape(operand).numpy_dtype()
scalar = xla_client.Shape.array_shape(dtype, ())
return c.Reduce(operand, c.Constant(onp.array(0, dtype)),
xla.primitive_computation(add_p, scalar, scalar),
axes)
def _reduce_sum_transpose_rule(cotangent, input_shape, axes):
broadcast_dimensions = tuple(onp.delete(onp.arange(len(input_shape)), axes))
result = broadcast_in_dim(cotangent, input_shape, broadcast_dimensions)
assert result.shape == input_shape
return [result]
reduce_sum_p = standard_primitive(_reduce_sum_shape_rule, _input_dtype,
'reduce_sum', _reduce_sum_translation_rule)
ad.deflinear(reduce_sum_p, _reduce_sum_transpose_rule)
batching.defreducer(reduce_sum_p)
def _reduce_prod_shape_rule(operand, axes):
return tuple(onp.delete(operand.shape, axes))
def _reduce_prod_translation_rule(c, operand, axes):
dtype = c.GetShape(operand).numpy_dtype()
scalar = xla_client.Shape.array_shape(dtype, ())
return c.Reduce(operand, c.Constant(onp.array(1, dtype)),
xla.primitive_computation(mul_p, scalar, scalar),
axes)
def _reduce_prod_jvp_rule(tangent, operand, axes):
input_shape = onp.array(operand.shape)
n = onp.prod(input_shape[list(axes)])
non_axes = onp.delete(onp.arange(len(input_shape)), axes)
# Move the reduced axes to the front, and flatten them to 1D.
permutation = axes + tuple(non_axes)
new_shape = (n,) + tuple(input_shape[non_axes])
operand = reshape(operand, new_shape, permutation)
tangent = reshape(tangent, new_shape, permutation)
one = _const(operand, 1)
window_dims = [n] + [1] * len(non_axes)
window_strides = [1] * (len(non_axes) + 1)
# Form the partial products of all elements to the left and right of each
# element.
left_padding = [(n, -1, 0)] + [(0, 0, 0)] * len(non_axes)
right_padding = [(-1, n, 0)] + [(0, 0, 0)] * len(non_axes)
left_products = _reduce_window_prod(pad(operand, one, left_padding),
window_dims, window_strides,
xla_client.PaddingType.VALID)
right_products = _reduce_window_prod(pad(operand, one, right_padding),
window_dims, window_strides,
xla_client.PaddingType.VALID)
# Multiply partial products with the tangents and sum.
return _reduce_sum(mul(tangent, mul(left_products, right_products)), (0,))
reduce_prod_p = standard_primitive(_reduce_prod_shape_rule, _input_dtype,
'reduce_prod', _reduce_prod_translation_rule)
ad.defjvp(reduce_prod_p, _reduce_prod_jvp_rule)
batching.defreducer(reduce_prod_p)
def _reduce_chooser_shape_rule(operand, axes):
return tuple(onp.delete(operand.shape, axes))
def _reduce_chooser_translation_rule(prim, identity, c, operand, axes):
dtype = c.GetShape(operand).numpy_dtype()
scalar = xla_client.Shape.array_shape(dtype, ())
return c.Reduce(operand, c.Constant(identity(dtype)),
xla.primitive_computation(prim, scalar, scalar), axes)
def _reduce_chooser_jvp_rule(g, ans, operand, axes):
# TODO(mattjj): an alternative is to use variadic reduce to compute the chosen
# locations in a single pass (rather than comparing equality) and use a
# gather, and/or even push along the chosen elements of g (b/112040122)
shape = [1 if i in axes else d for i, d in enumerate(operand.shape)]
location_indicators = convert_element_type(
_eq_meet(operand, reshape(ans, shape)), g.dtype)
counts = _reduce_sum(location_indicators, axes)
return div(_reduce_sum(mul(g, location_indicators), axes), counts)
_reduce_max_translation_rule = partial(_reduce_chooser_translation_rule, max_p,
_get_max_identity)
reduce_max_p = standard_primitive(_reduce_chooser_shape_rule, _input_dtype,
'reduce_max', _reduce_max_translation_rule)
ad.defjvp2(reduce_max_p, _reduce_chooser_jvp_rule)
batching.defreducer(reduce_max_p)
_reduce_min_translation_rule = partial(
_reduce_chooser_translation_rule, min_p, _get_min_identity)
reduce_min_p = standard_primitive(_reduce_chooser_shape_rule, _input_dtype,
'reduce_min', _reduce_min_translation_rule)
ad.defjvp2(reduce_min_p, _reduce_chooser_jvp_rule)
batching.defreducer(reduce_min_p)
def _reduce_logical_shape_rule(operand, axes):
if operand.dtype != onp.bool_:
msg = "logical reduction requires operand dtype bool, got {}."
raise TypeError(msg.format(operand.dtype))
return tuple(onp.delete(operand.shape, axes))
def _reduce_logical_translation_rule(prim, identity, c, operand, axes):
scalar = xla_client.Shape.array_shape(onp.dtype(onp.bool_), ())
return c.Reduce(operand, c.Constant(identity(onp.bool_)),
xla.primitive_computation(prim, scalar, scalar), axes)
_reduce_or_translation_rule = partial(_reduce_logical_translation_rule,
or_p, _get_max_identity)
reduce_or_p = standard_primitive(_reduce_logical_shape_rule, _fixed_dtype(onp.bool_),
'reduce_or', _reduce_or_translation_rule)
batching.defreducer(reduce_or_p)
_reduce_and_translation_rule = partial(_reduce_logical_translation_rule,
and_p, _get_min_identity)
reduce_and_p = standard_primitive(_reduce_logical_shape_rule, _fixed_dtype(onp.bool_),
'reduce_and', _reduce_and_translation_rule)
batching.defreducer(reduce_and_p)
def _reduce_window_shape_rule(operand, init_value, jaxpr, consts,
window_dimensions, window_strides, padding):
if operand.dtype != init_value.dtype:
msg = ("reduce_window got inconsistent dtypes for operand and init_value: "
" got operand dtype {} and init_value dtype {}.")
raise TypeError(msg.format(operand.dtype, init_value.dtype))
return _common_reduce_window_shape_rule(operand, window_dimensions,
window_strides, padding)
def _reduce_window_translation_rule(c, operand, init_value, jaxpr, consts,
window_dimensions, window_strides, padding):
xla_computation = _reduction_computation(c, jaxpr, consts, init_value)
return c.ReduceWindow(operand, init_value, xla_computation, window_dimensions,
window_strides, padding)
def _generic_reduce_window_batch_rule(
batched_args, batch_dims, jaxpr, consts, window_dimensions, window_strides,
padding):
operand, init = batched_args
bdim, init_bdim = batch_dims
if init_bdim is not None:
raise NotImplementedError("reduce_window batching is not implemented for "
"initial values")
def reduce_window(x, window_dimensions, window_strides, padding):
return reduce_window_p.bind(
x, init, jaxpr=jaxpr, consts=consts, window_dimensions=window_dimensions,
window_strides=window_strides, padding=padding)
return _reduce_window_batch_rule(reduce_window, (operand,), (bdim,),
window_dimensions, window_strides, padding)
reduce_window_p = standard_primitive(
_reduce_window_shape_rule, _input_dtype, 'reduce_window',
_reduce_window_translation_rule)
batching.primitive_batchers[reduce_window_p] = _generic_reduce_window_batch_rule
def _reduce_window_sum_shape_rule(operand, window_dimensions, window_strides,
padding, input_shape):
return _common_reduce_window_shape_rule(operand, window_dimensions,
window_strides, padding)
def _reduce_window_sum_translation_rule(c, operand, window_dimensions,
window_strides, padding, input_shape):
dtype = c.GetShape(operand).numpy_dtype()
scalar = xla_client.Shape.array_shape(dtype, ())
return c.ReduceWindow(operand, c.Constant(onp.array(0, dtype)),
xla.primitive_computation(add_p, scalar, scalar),
window_dimensions, window_strides, padding)
def _reduce_window_sum_transpose_rule(cotangent, window_dimensions,
window_strides, padding, input_shape):
in_pads = padtype_to_pads(input_shape, window_dimensions, window_strides,
padding)
ones = [1] * len(input_shape)
pads = _conv_general_vjp_lhs_padding(
input_shape, window_dimensions, window_strides, cotangent.shape, in_pads,
ones, ones)
padding_config = [(lo, hi, stride - 1)
for (lo, hi), stride in zip(pads, window_strides)]
pad_cotangent = pad(cotangent, _zero(cotangent), padding_config)
result = _reduce_window_sum(pad_cotangent, window_dimensions, ones,
xla_client.PaddingType.VALID)
assert result.shape == input_shape
return [result]
def _reduce_window_batch_rule(
reduce_window, batched_args, bdims, window_dimensions, window_strides,
padding, input_shape=None):
operand, = batched_args
bdim, = bdims
if bdim is not None:
window_dimensions = \
window_dimensions[:bdim] + (1,) + window_dimensions[bdim:]
window_strides = window_strides[:bdim] + (1,) + window_strides[bdim:]
operand = reduce_window(
operand, window_dimensions, window_strides, padding)
return operand, bdim
reduce_window_sum_p = standard_primitive(
_reduce_window_sum_shape_rule, _input_dtype, 'reduce_window_sum',
_reduce_window_sum_translation_rule)
ad.deflinear(reduce_window_sum_p, _reduce_window_sum_transpose_rule)
batching.primitive_batchers[reduce_window_sum_p] = partial(
_reduce_window_batch_rule, _reduce_window_sum)
def _reduce_window_chooser_translation_rule(
prim, identity, c, operand, window_dimensions, window_strides, padding):
dtype = c.GetShape(operand).numpy_dtype()
scalar = xla_client.Shape.array_shape(dtype, ())
return c.ReduceWindow(operand, c.Constant(identity(dtype)),
xla.primitive_computation(prim, scalar, scalar),
window_dimensions, window_strides, padding)
def _reduce_window_chooser_jvp_rule(prim, g, operand, window_dimensions,
window_strides, padding):
assert prim is max_p or prim is min_p
select_prim = ge_p if prim is max_p else le_p
return _select_and_gather_add(g, operand, select_prim, window_dimensions,
window_strides, padding)
def _common_reduce_window_shape_rule(operand, window_dimensions, window_strides,
padding):
_check_shapelike("reduce_window", "window_dimensions", window_dimensions)
_check_shapelike("reduce_window", "window_strides", window_strides)
if operand.ndim != len(window_dimensions):
msg = ("reduce_window got the wrong number of window_dimensions for "
"operand: got operand shape {} with window_dimensions {}.")
raise TypeError(msg.format(operand.shape, window_dimensions))
if len(window_strides) != len(window_dimensions):
msg = ("reduce_window got inconsistent window_strides and "
"window_dimensions: got window_strides {} and window_dimensions {}.")
raise TypeError(msg.format(window_strides, window_dimensions))
return reduce_window_shape_tuple(operand.shape, window_dimensions,
window_strides, padding)
def reduce_window_shape_tuple(operand_shape, window_dimensions, window_strides,
padding):
pads = padtype_to_pads(operand_shape, window_dimensions, window_strides, padding)
operand_padded = onp.add(operand_shape, onp.add(*zip(*pads)))
t = onp.floor_divide(
onp.subtract(operand_padded, window_dimensions), window_strides) + 1
return tuple(t)
_reduce_window_max_translation_rule = partial(
_reduce_window_chooser_translation_rule, max_p, _get_max_identity)
reduce_window_max_p = standard_primitive(
_common_reduce_window_shape_rule, _input_dtype, 'reduce_window_max',
_reduce_window_max_translation_rule)
ad.defjvp(reduce_window_max_p, partial(_reduce_window_chooser_jvp_rule, max_p))
batching.primitive_batchers[reduce_window_max_p] = partial(
_reduce_window_batch_rule, _reduce_window_max)
_reduce_window_min_translation_rule = partial(
_reduce_window_chooser_translation_rule, min_p, _get_min_identity)
reduce_window_min_p = standard_primitive(
_common_reduce_window_shape_rule, _input_dtype, 'reduce_window_min',
_reduce_window_min_translation_rule)
ad.defjvp(reduce_window_min_p, partial(_reduce_window_chooser_jvp_rule, min_p))
_reduce_window_min_batch_rule = partial(_reduce_window_batch_rule,
_reduce_window_min)
batching.primitive_batchers[reduce_window_min_p] = partial(
_reduce_window_batch_rule, _reduce_window_min)
def _select_and_scatter_shape_rule(
operand, source, init_value, select_jaxpr, select_consts, scatter_jaxpr,
scatter_consts, window_dimensions, window_strides, padding):
_check_shapelike("select_and_scatter", "window_dimensions", window_dimensions)
_check_shapelike("select_and_scatter", "window_strides", window_strides)
if len(window_dimensions) != len(window_strides):
msg = ("select_and_scatter got inconsistent window_strides and "
"window_dimensions: got window_strides {} and window_dimensions {}.")
raise TypeError(msg.format(window_strides, window_dimensions))
return operand.shape
def _select_and_scatter_translation(
c, operand, source, init_value, select_jaxpr, select_consts, scatter_jaxpr,
scatter_consts, window_dimensions, window_strides, padding):
select = _reduction_computation(c, select_jaxpr, select_consts, init_value)
scatter = _reduction_computation(c, scatter_jaxpr, scatter_consts, init_value)
return c.SelectAndScatter(operand, select, window_dimensions, window_strides,
padding, source, init_value, scatter)
select_and_scatter_p = standard_primitive(
_select_and_scatter_shape_rule, _input_dtype, 'select_and_scatter',
_select_and_scatter_translation)
def _select_and_scatter_add_shape_rule(
source, operand, select_prim, window_dimensions, window_strides, padding):
return operand.shape
def _select_and_scatter_add_translation(
c, source, operand, select_prim, window_dimensions, window_strides,
padding):
dtype = c.GetShape(operand).numpy_dtype()
scalar = xla_client.Shape.array_shape(dtype, ())
select = xla.primitive_computation(select_prim, scalar, scalar)
scatter = xla.primitive_computation(add_p, scalar, scalar)
zero = c.Constant(onp.array(0, dtype))
return c.SelectAndScatter(operand, select, window_dimensions, window_strides,
padding, source, zero, scatter)
def _select_and_scatter_add_jvp(
primals, tangents, select_prim, window_dimensions, window_strides,
padding):
source, operand = primals
g_source, g_operand = tangents
val_out = _select_and_scatter_add(
source, operand, select_prim, window_dimensions, window_strides,
padding)
del g_operand
if g_source is ad_util.zero:
tangent_out = ad_util.zero
else:
tangent_out = _select_and_scatter_add(
g_source, operand, select_prim, window_dimensions,
window_strides, padding)
return val_out, tangent_out
def _select_and_scatter_add_transpose(
t, source, operand, select_prim, window_dimensions, window_strides,
padding):
assert source is None and operand is not None
source_t = _select_and_gather_add(t, operand, select_prim, window_dimensions,
window_strides, padding)
return [source_t, None]
def _select_and_scatter_add_batch_rule(batched_args, batch_dims, **kwargs):
source, operand = batched_args
s_bdims, o_bdims = batch_dims
if s_bdims is not None and o_bdims is not None:
#TODO(#212): use a map construct instead of unrolling.
source = batching.move_dim_to_front(source, s_bdims)
operand = batching.move_dim_to_front(operand, o_bdims)
outputs = [
_select_and_scatter_add(s, o, **kwargs) for s, o in zip(source, operand)]
outputs = [reshape(out, (1,) + out.shape) for out in outputs]
outputs = concatenate(outputs, 0)
return outputs, 0
elif s_bdims is not None:
#TODO(#212): use a map construct instead of unrolling.
source = batching.move_dim_to_front(source, s_bdims)
outputs = [
_select_and_scatter_add(s, operand, **kwargs) for s in source]
outputs = [reshape(out, (1,) + out.shape) for out in outputs]
outputs = concatenate(outputs, 0)
return outputs, 0
elif o_bdims is not None:
#TODO(#212): use a map construct instead of unrolling.
operand = batching.move_dim_to_front(operand, o_bdims)
outputs = [
_select_and_scatter_add(source, o, **kwargs) for o in operand]
outputs = [reshape(out, (1,) + out.shape) for out in outputs]
outputs = concatenate(outputs, 0)
return outputs, 0
select_and_scatter_add_p = standard_primitive(
_select_and_scatter_add_shape_rule, _input_dtype, 'select_and_scatter_add',
_select_and_scatter_add_translation)
ad.primitive_transposes[select_and_scatter_add_p] = \
_select_and_scatter_add_transpose
ad.primitive_jvps[select_and_scatter_add_p] = _select_and_scatter_add_jvp
batching.primitive_batchers[select_and_scatter_add_p] = \
_select_and_scatter_add_batch_rule
def _select_and_gather_add_shape_rule(
tangents, operand, select_prim, window_dimensions, window_strides, padding):
if tangents.shape != operand.shape:
msg = ("select_and_gather_add tangents and operand shapes must match, "
"got {} and {}.")
raise TypeError(msg.format(tangents.shape, operand.shape))
return _common_reduce_window_shape_rule(operand, window_dimensions,
window_strides, padding)
_UINT_DTYPES = {
16: onp.uint16,
32: onp.uint32,
64: onp.uint64,
}
def _select_and_gather_add_translation(
c, tangents, operand, select_prim, window_dimensions, window_strides,
padding, max_bits=64):
shape = c.GetShape(operand)
dtype = shape.numpy_dtype()
etype = shape.xla_element_type()
nbits = onp.finfo(dtype).bits
assert nbits <= max_bits
double_word_reduction = nbits * 2 <= max_bits
const = lambda c, dtype, x: c.Constant(onp.array(x, dtype=dtype),
canonicalize_types=False)
if double_word_reduction:
# XLA doesn't yet implement ReduceWindow on tuples (Google bug b/73062247), so
# we implement a pair-wise ReduceWindow by packing two k-bit values into
# 2k-bit unsigned integer using bit tricks.
word_dtype = _UINT_DTYPES[nbits]
double_word_dtype = _UINT_DTYPES[nbits * 2]
word_type = xla_client.dtype_to_etype(word_dtype)
double_word_type = xla_client.dtype_to_etype(double_word_dtype)
# Packs two values into a tuple.
def pack(a, b):
a = c.BitcastConvertType(a, word_type)
b = c.BitcastConvertType(b, word_type)
a = c.ConvertElementType(a, double_word_type)
b = c.ConvertElementType(b, double_word_type)
a = c.ShiftLeft(a, const(c, double_word_dtype, nbits))
return c.Or(a, b)
# Unpacks the first element of a tuple.
def fst(c, t):
st = c.ShiftRightLogical(t, const(c, double_word_dtype, nbits))
return c.BitcastConvertType(c.ConvertElementType(st, word_type), etype)
# Unpacks the second element of a tuple.
def snd(t):
return c.BitcastConvertType(c.ConvertElementType(t, word_type), etype)
else:
# The double-word trick above only works if we have a sufficiently large
# type. As an alternative, we can pack two half words into a single word,
# at the cost of precision.
# TODO(b/73062247): add support for tuple reductions and remove this case.
warnings.warn("Using reduced precision for gradient of reduce-window "
"min/max operator to work around missing XLA support for "
"pair-reductions. This is likely from a second or "
"higher derivative of a max-pooling operation.")
r_nbits = nbits // 2
# Drop/round the bottom mantissa bits.
nexp = onp.finfo(dtype).nexp
nmant = r_nbits - nexp - 1
double_word_dtype = word_dtype = _UINT_DTYPES[nbits]
word_type = xla_client.dtype_to_etype(word_dtype)
# Packs two values into a tuple.
def pack(a, b):
a = c.ReducePrecision(a, exponent_bits=nexp, mantissa_bits=nmant)
b = c.ReducePrecision(b, exponent_bits=nexp, mantissa_bits=nmant)
a = c.BitcastConvertType(a, word_type)
b = c.BitcastConvertType(b, word_type)
b = c.ShiftRightLogical(b, const(c, word_dtype, r_nbits))
return c.Or(a, b)
# Unpacks the first element of a tuple.
def fst(c, t):
st = c.And(t, const(c, word_dtype, ((1 << r_nbits) - 1) << r_nbits))
return c.BitcastConvertType(st, etype)
# Unpacks the second element of a tuple.
def snd(t):
return c.BitcastConvertType(c.ShiftLeft(t, const(c, word_dtype, r_nbits)),
etype)
def reducer():
c = xla_bridge.make_computation_builder("select_and_gather_pair_reducer")
x = c.ParameterWithShape(
xla_client.Shape.array_shape(onp.dtype(double_word_dtype), ()))
y = c.ParameterWithShape(
xla_client.Shape.array_shape(onp.dtype(double_word_dtype), ()))
assert select_prim is ge_p or select_prim is le_p
which = c.Ge if select_prim is ge_p else c.Le
c.Select(which(fst(c, x), fst(c, y)), x, y)
return c.Build()
assert select_prim is ge_p or select_prim is le_p
init = -onp.inf if select_prim is ge_p else onp.inf
out = c.ReduceWindow(pack(operand, tangents),
pack(const(c, dtype, init), const(c, dtype, 0)),
reducer(), window_dimensions, window_strides,
padding)
return snd(out)
def _select_and_gather_add_jvp(
primals, tangents, select_prim, window_dimensions, window_strides,
padding):
source, operand = primals
g_source, g_operand = tangents
val_out = _select_and_gather_add(
source, operand, select_prim, window_dimensions, window_strides,
padding)
del g_operand
if g_source is ad_util.zero:
tangent_out = ad_util.zero
else:
tangent_out = _select_and_gather_add(
g_source, operand, select_prim, window_dimensions,
window_strides, padding)
return val_out, tangent_out
def _select_and_gather_add_transpose(
t, tangents, operand, select_prim, window_dimensions, window_strides,
padding):
assert tangents is None and operand is not None
result = _select_and_scatter_add(t, operand, select_prim, window_dimensions,
window_strides, padding)
return [result, None]
select_and_gather_add_p = standard_primitive(
_select_and_gather_add_shape_rule, _input_dtype, 'select_and_gather_add',
_select_and_gather_add_translation)
ad.primitive_jvps[select_and_gather_add_p] = _select_and_gather_add_jvp
ad.primitive_transposes[select_and_gather_add_p] = \
_select_and_gather_add_transpose
xla.backend_specific_translations['tpu'][select_and_gather_add_p] = partial(
_select_and_gather_add_translation,
max_bits=32)
sort_shape = lambda operand, dimension: operand.shape
def _sort_jvp_rule(g, operand, dimension):
_, g_out = sort_key_val(operand, g, dimension)
return g_out
def _sort_batch_rule(batched_args, batch_dims, dimension):
operand, = batched_args
bdim, = batch_dims
dimension = dimension % (operand.ndim - 1)
new_dimension = dimension + (bdim <= dimension)
return sort(operand, dimension=new_dimension), bdim
sort_p = standard_primitive(sort_shape, _input_dtype, 'sort')
ad.defjvp(sort_p, _sort_jvp_rule)
batching.primitive_batchers[sort_p] = _sort_batch_rule
def _sort_key_val_abstract_eval(keys, values, dimension):
return core.AbstractTuple((keys, values))
def _sort_key_val_impl(keys, values, dimension):
out = xla.apply_primitive(sort_key_val_p, keys, values, dimension=dimension)
sorted_keys, sorted_values = out
return core.pack((sorted_keys, sorted_values))
def _sort_key_val_jvp(primals, tangents, dimension):
# NOTE(mattjj): this re-sorts three times, but if we had a variadic
# sort_key_val, or if we could apply a fixed permutation efficiently, we could
# implement this jvp rule with a single sort. The apply_permutation primitive
# would make the jvp (and corresponding transpose rule) faster and easier.
# This would also be cleaner if we didn't get the sorted keys out.
# TODO(mattjj): make sort_key_val variadic, no sorted keys out by default
keys, values = primals
keys_tangents, values_tangents = tangents
val_out = sort_key_val(keys, values, dimension)
if keys_tangents is ad_util.zero:
keys_tangents_out = ad_util.zero
else:
keys_tangents_out = _sort_jvp_rule(keys_tangents, keys, dimension)
if values_tangents is ad_util.zero:
values_tangents_out = ad_util.zero
else:
values_tangents_out = _sort_jvp_rule(values_tangents, keys, dimension)
tangents_out = keys_tangents_out, values_tangents_out
return core.pack(val_out), ad.TangentTuple(tangents_out)
def _sort_key_val_transpose_rule(t, keys, values, dimension):
t_keys, t_values = t
assert t_keys is ad_util.zero
iota = broadcasted_iota(onp.int32, keys.shape, dimension % keys.ndim)
_, perm = sort_key_val(keys, iota)
keys_result = ad_util.zero if keys is None else None
values_result = sort_key_val(perm, t_values)[1] if values is None else None
return [keys_result, values_result]
def _sort_key_val_batch_rule(batched_args, batch_dims, dimension):
keys, values = batched_args
keys_bdim, values_bdim = batch_dims
assert keys_bdim is not None or values_bdim is not None
if keys_bdim == values_bdim:
new_dimension = dimension + (keys_bdim <= dimension)
out = sort_key_val(keys, values, new_dimension)
return core.pack(out), keys_bdim
elif keys_bdim is not None and values_bdim is not None:
keys_trans = batching.moveaxis(keys.shape[keys_bdim], values_bdim,
keys_bdim, keys)
new_dimension = dimension + (values_bdim <= dimension)
out = sort_key_val(keys_trans, values, new_dimension)
return core.pack(out), values_bdim
elif keys_bdim is None:
broadcast_dimensions = onp.delete(onp.arange(values.ndim), values_bdim)
new_keys = broadcast_in_dim(keys, values.shape, broadcast_dimensions)
new_dimension = dimension + (values_bdim <= dimension)
out = sort_key_val(new_keys, values, new_dimension)
return core.pack(out), values_bdim
elif values_bdim is None:
broadcast_dimensions = onp.delete(onp.arange(keys.ndim), keys_bdim)
new_values = broadcast_in_dim(values, keys.shape, broadcast_dimensions)
new_dimension = dimension + (keys_bdim <= dimension)
out = sort_key_val(keys, new_values, new_dimension)
return core.pack(out), keys_bdim
else:
raise Exception # unreachable
sort_key_val_p = Primitive('sort_key_val')
sort_key_val_p.def_impl(_sort_key_val_impl)
sort_key_val_p.def_abstract_eval(_sort_key_val_abstract_eval)
xla.translations[sort_key_val_p] = partial(standard_translate, 'sort_key_val')
ad.primitive_jvps[sort_key_val_p] = _sort_key_val_jvp
ad.primitive_transposes[sort_key_val_p] = _sort_key_val_transpose_rule
batching.primitive_batchers[sort_key_val_p] = _sort_key_val_batch_rule
def _tie_in_transpose_rule(t):
return [ad_util.zero, t]
def _tie_in_batch_rule(batched_args, batch_dims):
y = tie_in(*batched_args)
_, bdim_y = batch_dims
return y, bdim_y
tie_in_p = Primitive('tie_in')
tie_in_p.def_impl(lambda x, y: y)
tie_in_p.def_abstract_eval(lambda x, y: y)
xla.translations[tie_in_p] = lambda c, x, y: y
ad.deflinear(tie_in_p, _tie_in_transpose_rule)
batching.primitive_batchers[tie_in_p] = _tie_in_batch_rule
shaped_identity_p = Primitive('shape_id')
shaped_identity_p.def_impl(lambda x, shape: x)
shaped_identity_p.def_abstract_eval(lambda x, shape: x)
xla.translations[shaped_identity_p] = lambda c, x, shape: x
ad.deflinear(shaped_identity_p, lambda t, shape: [shaped_identity(t)])
batching.primitive_batchers[shaped_identity_p] = \
lambda a, d, shape: (shaped_identity(a[0]), d[0])
### constants
class _FilledConstant(xla.DeviceConstant):
__slots__ = ["fill_value"]
def __init__(self, fill_value, shape):
assert type(fill_value) is onp.ndarray
self.shape = shape
self.dtype = _dtype(fill_value)
self.ndim = len(shape)
self.size = prod(shape)
self._npy_value = None
self.fill_value = fill_value
@property
def _value(self):
return onp.full(self.shape, self.fill_value)
@staticmethod
def constant_handler(c, filled_const, canonicalize_types=True):
return c.Broadcast(
c.NumpyArrayConstant(filled_const.fill_value, canonicalize_types),
filled_const.shape)
class _IotaConstant(xla.DeviceConstant):
__slots__ = ["axis"]
def __init__(self, dtype, shape, axis):
self.shape = shape
self.dtype = onp.dtype(dtype)
self.ndim = len(shape)
self.size = prod(shape)
self._npy_value = None
self.axis = axis
@property
def _value(self):
if self._npy_value is None:
iota = onp.arange(self.shape[self.axis], dtype=self.dtype)
iota = iota.reshape([self.shape[self.axis] if i == self.axis else 1
for i in range(self.ndim)])
self._npy_value = onp.broadcast_to(iota, self.shape)
return self._npy_value
@staticmethod
def constant_handler(c, iota_constant, canonicalize_types=True):
dtype = iota_constant.dtype
if canonicalize_types:
dtype = xla_bridge.canonicalize_dtype(dtype)
return c.BroadcastedIota(dtype, iota_constant.shape, iota_constant.axis)
class _EyeConstant(xla.DeviceConstant):
__slots__ = ["axes"]
def __init__(self, shape, axes, dtype):
self.shape = shape
self.dtype = onp.dtype(dtype)
self.ndim = len(shape)
self.size = prod(shape)
self._npy_value = None
self.axes = axes
@property
def _value(self):
if self._npy_value is None:
ones = [1] * self.ndim
iotas = [onp.arange(self.shape[axis]).reshape(subvals(ones, [(axis, -1)]))
for axis in self.axes]
eyes = [i1 == i2 for i1, i2 in zip(iotas[:-1], iotas[1:])]
result = onp.asarray(_reduce(operator.and_, eyes), self.dtype)
self._npy_value = onp.broadcast_to(result, self.shape)
return self._npy_value
@staticmethod
def constant_handler(c, diag_const, canonicalize_types=True):
if canonicalize_types:
etype = xla_bridge.dtype_to_etype(diag_const.dtype)
else:
etype = xla_client.dtype_to_etype(diag_const.dtype)
etype = xla_bridge.dtype_to_etype(diag_const.dtype)
iotas = [c.BroadcastedIota(onp.uint32, diag_const.shape, axis)
for axis in diag_const.axes]
eyes = [c.Eq(i1, i2) for i1, i2 in zip(iotas[:-1], iotas[1:])]
return c.ConvertElementType(_reduce(c.And, eyes), etype)
for _t in [_FilledConstant, _IotaConstant, _EyeConstant]:
xla_bridge.register_constant_handler(_t, _t.constant_handler)
core.pytype_aval_mappings[_t] = ConcreteArray
xla.pytype_aval_mappings[_t] = xla.pytype_aval_mappings[xla.DeviceArray]
xla.canonicalize_dtype_handlers[_t] = _identity
batching.pytype_aval_mappings[_t] = make_shaped_array
ad_util.jaxval_adders[_t] = add
ad_util.jaxval_zeros_likers[_t] = zeros_like_array
### stop-gradient
def _stop_gradient_jvp_rule(primals, tangents):
# if we don't call stop_gradient here, we'd only peel off one autodiff tracer
x, = primals
return stop_gradient(x), ad_util.zero
def _stop_gradient_batch_rule(batched_args, batch_dims):
x, = batched_args
dim, = batch_dims
return stop_gradient(x), dim
stop_gradient_p = Primitive('stop_gradient')
stop_gradient_p.def_impl(_identity)
stop_gradient_p.def_abstract_eval(_identity)
xla.translations[stop_gradient_p] = lambda c, x: x
ad.primitive_jvps[stop_gradient_p] = _stop_gradient_jvp_rule
batching.primitive_batchers[stop_gradient_p] = _stop_gradient_batch_rule
### util
def _ndim(x):
return x.ndim
def _dilate_shape(shape, dilation):
"""Utility function for computing the shape resulting from a dilation."""
if not onp.all(onp.greater(dilation, 0)):
msg = "All dilations must be positive, got {}."
raise TypeError(msg.format(dilation))
dilation = (1,) * (len(shape) - len(dilation)) + tuple(dilation)
return onp.multiply(dilation, onp.subtract(shape, 1)) + 1
def padtype_to_pads(in_shape, window_shape, window_strides, padding):
"""Convert padding string to list of pairs of pad values."""
PaddingType = xla_client.PaddingType
if isinstance(padding, str):
mapping = {'VALID': PaddingType.VALID, 'SAME': PaddingType.SAME}
try:
padding = mapping[padding.upper()]
except KeyError:
msg = "Unrecognized padding type: expected 'VALID' or 'SAME', got {}."
raise RuntimeError(msg.format(padding))
if padding == PaddingType.SAME:
out_shape = onp.ceil(onp.true_divide(in_shape, window_strides)).astype(int)
pad_sizes = [_max((out_size - 1) * stride + window_shape - in_size, 0)
for out_size, stride, window_shape, in_size
in zip(out_shape, window_strides, window_shape, in_shape)]
return [(pad_size // 2, pad_size - pad_size // 2) for pad_size in pad_sizes]
elif padding == PaddingType.VALID:
return [(0, 0)] * len(in_shape)
else:
msg = "Unknown padding type: {}."
raise TypeError(msg.format(padding))
def _check_same_dtypes(name, ignore_fp_precision, *dtypes):
"""Check that dtypes agree, possibly ignoring float precision."""
# the `ignore_fp_precision` flag exists because the XLA shape inference logic
# allows mixed floating point precision, but the HLO verifier often rejects it
dtypes = list(map(onp.dtype, dtypes)) # canonicalize
if ignore_fp_precision:
dtypes = [
onp.floating if onp.issubdtype(dtype, onp.floating)
else onp.complexfloating if onp.issubdtype(dtype, onp.complexfloating)
else dtype for dtype in dtypes]
if len({xla_bridge.canonicalize_dtype(t) for t in dtypes}) != 1:
if ignore_fp_precision:
msg = ("{} requires arguments to have same dtypes up to floating point "
"precision, got {}.")
else:
msg = "{} requires arguments to have the same dtypes, got {}."
raise TypeError(msg.format(name, ", ".join(map(str, dtypes))))
def _check_conv_shapes(name, lhs_shape, rhs_shape, window_strides):
"""Check that conv shapes are valid and are consistent with window_strides."""
if len(lhs_shape) != len(rhs_shape):
msg = "Arguments to {} must have same rank, got {} and {}."
raise TypeError(msg.format(name, len(lhs_shape), len(rhs_shape)))
if len(lhs_shape) < 2:
msg = "Arguments to {} must have rank at least 2, got {} and {}."
raise TypeError(msg.format(name, len(lhs_shape), len(rhs_shape)))
if lhs_shape[1] != rhs_shape[1]:
msg = "Arguments to {} must agree on input feature size, got {} and {}."
raise TypeError(msg.format(name, lhs_shape[1], rhs_shape[1]))
_check_shapelike(name, "window_strides", window_strides)
if not onp.all(onp.greater(window_strides, 0)):
msg = "All elements of window_strides must be positive, got {}."
raise TypeError(msg.format(window_strides))
if len(window_strides) != len(lhs_shape) - 2:
msg = "{} window_strides has wrong length: expected {}, got {}."
expected_length = len(lhs_shape) - 2
raise TypeError(msg.format(name, expected_length, len(window_strides)))
def conv_shape_tuple(lhs_shape, rhs_shape, strides, pads):
"""Compute the shape tuple of a conv given input shapes in canonical order."""
if isinstance(pads, str):
pads = padtype_to_pads(lhs_shape[2:], rhs_shape[2:], strides, pads)
if len(pads) != len(lhs_shape) - 2:
msg = "Wrong number of explicit pads for convolution: expected {}, got {}."
raise TypeError(msg.format(len(lhs_shape) - 2, len(pads)))
lhs_padded = onp.add(lhs_shape[2:], onp.add(*zip(*pads)))
out_space = onp.floor_divide(
onp.subtract(lhs_padded, rhs_shape[2:]), strides) + 1
out_space = onp.maximum(0, out_space)
out_shape = (lhs_shape[0], rhs_shape[0]) + tuple(out_space)
return tuple(out_shape)
def conv_general_shape_tuple(lhs_shape, rhs_shape, window_strides, padding,
dimension_numbers):
lhs_perm, rhs_perm, out_perm = conv_general_permutations(dimension_numbers)
lhs_trans = onp.take(lhs_shape, lhs_perm)
rhs_trans = onp.take(rhs_shape, rhs_perm)
out_trans = conv_shape_tuple(lhs_trans, rhs_trans, window_strides, padding)
return tuple(onp.take(out_trans, onp.argsort(out_perm)))
def conv_transpose_shape_tuple(lhs_shape, rhs_shape, window_strides, padding,
dimension_numbers):
lhs_perm, rhs_perm, out_perm = conv_general_permutations(dimension_numbers)
lhs_trans = onp.take(lhs_shape, lhs_perm)
rhs_trans = onp.take(rhs_shape, rhs_perm)
if isinstance(padding, str):
padding = [_conv_transpose_padding(k, s, padding)
for k,s in zip(rhs_trans[2:], window_strides)]
padding = list(map(onp.sum, padding))
unpad_out_space = [(i-1) * s - k + 2
for i, k, s in zip(lhs_trans[2:],
rhs_trans[2:],
window_strides)]
out_space = onp.sum([unpad_out_space, padding], axis=0).tolist()
out_trans = tuple((lhs_trans[0], rhs_trans[0]) + tuple(out_space))
return tuple(onp.take(out_trans, onp.argsort(out_perm)))
def _check_shapelike(fun_name, arg_name, obj):
"""Check that `obj` is a shape-like value (e.g. tuple of nonnegative ints)."""
if not isinstance(obj, (tuple, list, onp.ndarray)):
msg = "{} {} must be of type tuple/list/ndarray, got {}."
raise TypeError(msg.format(fun_name, arg_name, type(obj)))
# bool(obj) for an ndarray raises an error, so we check len
if not len(obj): # pylint: disable=g-explicit-length-test
return
obj_arr = onp.array(obj)
if obj_arr.ndim != 1:
msg = "{} {} must be rank 1, got {}."
raise TypeError(msg.format(obj_arr.ndim))
if not onp.issubdtype(obj_arr.dtype, onp.integer):
msg = "{} {} must have every element be an integer type, got {}."
raise TypeError(msg.format(fun_name, arg_name, tuple(map(type, obj))))
if not (obj_arr >= 0).all():
msg = "{} {} must have every element be nonnegative, got {}."
raise TypeError(msg.format(fun_name, arg_name, obj))
def _dynamic_slice_indices(operand, start_indices):
if isinstance(start_indices, (tuple, list)):
start_indices = concatenate([reshape(i, [1]) for i in start_indices], 0)
# map int over operand.shape to raise any dynamic-shape errors
shape = onp.asarray(list(map(int, operand.shape)), start_indices.dtype)
return rem(start_indices, shape)
def _const(example, val):
return onp.array(val, _dtype(example))
_zeros = partial(full_like, fill_value=0)
_zero = partial(full_like, shape=(), fill_value=0)
_ones = partial(full_like, fill_value=1)
_one = partial(full_like, shape=(), fill_value=1)
_twos = partial(full_like, fill_value=2)
_two = partial(full_like, shape=(), fill_value=2)
_dtype = dtype = onp.result_type
_iscomplex = lambda x: onp.issubdtype(_dtype(x), onp.complexfloating)
def ranges_like(*xs):
start = 0
for x in xs:
x_len = len(x)
yield range(start, start + x_len)
start += x_len
def remaining(original, *removed_lists):
blacklist = set(itertools.chain(*removed_lists))
return [i for i in original if i not in blacklist]
def _canonicalize_precision(precision):
if precision is None:
return None
if isinstance(precision, Precision):
return precision
else:
msg = "Precision argument must be None or a lax.Precision value; got {}"
raise ValueError(msg.format(precision))
# lhs_spec and out_spec are lists containing
# [batch dim, feature dim, spatial dims ...]
# rhs_spec is a list containing:
# [out feature dim, in feature dim, spatial dims ...]
class ConvDimensionNumbers(collections.namedtuple(
"ConvDimensionNumbers", ["lhs_spec", "rhs_spec", "out_spec"])):
"""Describes batch, spatial, and feature dimensions of a convolution.
Args:
lhs_spec: a tuple of nonnegative integer dimension numbers containing
`(batch dimension, feature dimension, spatial dimensions...)`.
rhs_spec: a tuple of nonnegative integer dimension numbers containing
`(out feature dimension, in feature dimension, spatial dimensions...)`.
out_spec: a tuple of nonnegative integer dimension numbers containing
`(batch dimension, feature dimension, spatial dimensions...)`.
"""
def conv_dimension_numbers(lhs_shape, rhs_shape, dimension_numbers):
"""Converts convolution `dimension_numbers` to a `ConvDimensionNumbers`.
Args:
lhs_shape: tuple of nonnegative integers, shape of the convolution input.
rhs_shape: tuple of nonnegative integers, shape of the convolution kernel.
dimension_numbers: None or a tuple/list of strings, following the
convolution dimension number specification format in xla_client.py.
Returns:
A `ConvDimensionNumbers` object that represents `dimension_numbers` in the
canonical form used by lax functions.
"""
if len(lhs_shape) != len(rhs_shape):
msg = "convolution requires lhs and rhs ndim to be equal, got {} and {}."
raise TypeError(msg.format(len(lhs_shape), len(rhs_shape)))
if dimension_numbers is None:
iota = tuple(range(len(lhs_shape)))
return ConvDimensionNumbers(iota, iota, iota)
elif isinstance(dimension_numbers, (list, tuple)):
if len(dimension_numbers) != 3:
msg = "convolution dimension_numbers list/tuple must be length 3, got {}."
raise TypeError(msg.format(len(dimension_numbers)))
if not all(isinstance(elt, str) for elt in dimension_numbers):
msg = "convolution dimension_numbers elements must be strings, got {}."
raise TypeError(msg.format(tuple(map(type, dimension_numbers))))
msg = ("convolution dimension_numbers[{}] must have len equal to the ndim "
"of lhs and rhs, got {} for lhs and rhs shapes {} and {}.")
for i, elt in enumerate(dimension_numbers):
if len(elt) != len(lhs_shape):
raise TypeError(msg.format(i, len(elt), lhs_shape, rhs_shape))
lhs_spec, rhs_spec, out_spec = conv_general_permutations(dimension_numbers)
return ConvDimensionNumbers(lhs_spec, rhs_spec, out_spec)
else:
msg = "convolution dimension_numbers must be tuple/list or None, got {}."
raise TypeError(msg.format(type(dimension_numbers)))
def conv_general_permutations(dimension_numbers):
"""Utility for convolution dimension permutations relative to Conv HLO."""
lhs_spec, rhs_spec, out_spec = dimension_numbers
lhs_char, rhs_char, out_char = charpairs = ("N", "C"), ("O", "I"), ("N", "C")
for i, (a, b) in enumerate(charpairs):
if not dimension_numbers[i].count(a) == dimension_numbers[i].count(b) == 1:
msg = ("convolution dimension_numbers[{}] must contain the characters "
"'{}' and '{}' exatly once, got {}.")
raise TypeError(msg.format(i, a, b, dimension_numbers[i]))
if len(dimension_numbers[i]) != len(set(dimension_numbers[i])):
msg = ("convolution dimension_numbers[{}] cannot have duplicate "
"characters, got {}.")
raise TypeError(msg.format(i, dimension_numbers[i]))
if not (set(lhs_spec) - set(lhs_char) == set(rhs_spec) - set(rhs_char) ==
set(out_spec) - set(out_char)):
msg = ("convolution dimension_numbers elements must each have the same "
"set of spatial characters, got {}.")
raise TypeError(msg.format(dimension_numbers))
def getperm(spec, charpair):
spatial = (i for i, c in enumerate(spec) if c not in charpair)
if spec is not rhs_spec:
spatial = sorted(spatial, key=lambda i: rhs_spec.index(spec[i]))
return (spec.index(charpair[0]), spec.index(charpair[1])) + tuple(spatial)
lhs_perm, rhs_perm, out_perm = map(getperm, dimension_numbers, charpairs)
return lhs_perm, rhs_perm, out_perm
def _conv_general_proto(dimension_numbers):
assert type(dimension_numbers) is ConvDimensionNumbers
lhs_spec, rhs_spec, out_spec = dimension_numbers
proto = xla_client.ConvolutionDimensionNumbers()
proto.input_batch_dimension = lhs_spec[0]
proto.input_feature_dimension = lhs_spec[1]
proto.output_batch_dimension = out_spec[0]
proto.output_feature_dimension = out_spec[1]
proto.kernel_output_feature_dimension = rhs_spec[0]
proto.kernel_input_feature_dimension = rhs_spec[1]
proto.input_spatial_dimensions.extend(lhs_spec[2:])
proto.kernel_spatial_dimensions.extend(rhs_spec[2:])
proto.output_spatial_dimensions.extend(out_spec[2:])
return proto
def _conv_general_vjp_lhs_padding(
in_shape, window_dimensions, window_strides, out_shape, padding,
lhs_dilation, rhs_dilation):
lhs_dilated_shape = _dilate_shape(in_shape, lhs_dilation)
rhs_dilated_shape = _dilate_shape(window_dimensions, rhs_dilation)
out_dilated_shape = _dilate_shape(out_shape, window_strides)
pad_before = onp.subtract(rhs_dilated_shape, [lo for lo, _ in padding]) - 1
pad_after = (onp.add(lhs_dilated_shape, rhs_dilated_shape) - 1
- out_dilated_shape - pad_before)
return zip(pad_before, pad_after)
def _conv_general_vjp_rhs_padding(
in_shape, window_dimensions, window_strides, out_shape, padding,
lhs_dilation, rhs_dilation):
lhs_dilated_shape = _dilate_shape(in_shape, lhs_dilation)
rhs_dilated_shape = _dilate_shape(window_dimensions, rhs_dilation)
out_dilated_shape = _dilate_shape(out_shape, window_strides)
total_in_pad = out_dilated_shape + rhs_dilated_shape - lhs_dilated_shape - 1
return [(pad[0], tot - pad[0]) for pad, tot in zip(padding, total_in_pad)]
def _balanced_eq(x, z, y):
return div(select(_eq_meet(x, z), _ones(z), _zeros(z)),
select(_eq_meet(y, z), _twos(z), _ones(z)))
def _eq_meet(a, b):
a_dtype, b_dtype = _dtype(a), _dtype(b)
if a_dtype != b_dtype:
higher_dtype = onp.promote_types(a_dtype, b_dtype)
if higher_dtype == a_dtype:
a = convert_element_type(a, b_dtype)
else:
b = convert_element_type(b, a_dtype)
return eq(a, b)
def subvals(lst, replace):
lst = list(lst)
for i, v in replace:
lst[i] = v
return tuple(lst)
def _abstractify(x):
# used internally for initial-style higher-order primitives
return pe.PartialVal((raise_to_shaped(core.get_aval(x)), core.unit))
| 40.832877 | 107 | 0.714406 |
f9fe14a12dced97e71f75046848803b2d80cb2cb | 1,616 | py | Python | cipher/caesar.py | oliverchen415/cli_python | 62cf7edc03c0f36a5544e6f5bf4127868c980dc0 | [
"MIT"
] | null | null | null | cipher/caesar.py | oliverchen415/cli_python | 62cf7edc03c0f36a5544e6f5bf4127868c980dc0 | [
"MIT"
] | null | null | null | cipher/caesar.py | oliverchen415/cli_python | 62cf7edc03c0f36a5544e6f5bf4127868c980dc0 | [
"MIT"
] | null | null | null | import click
import string
alpha_list = string.ascii_letters
# Numbers corresponding to the ASCII letters
upper_list = [number for number in range(65,91)]
lower_list = [number for number in range(97,123)]
def letter_shift(letter, shift):
"""Shifts a letter by a given number
Args:
letter (String): Letter to shift
shift (Int): A value to shift a letter by
Returns:
String: A new letter shifted by a given number
"""
if letter not in alpha_list:
return letter
shift_letter = ord(letter) + shift
if (shift_letter in upper_list or shift_letter in lower_list):
return chr(shift_letter)
elif (shift_letter > max(upper_list) or shift_letter > max(lower_list)):
return chr(shift_letter - 26)
else:
return chr(shift_letter + 26)
@click.command()
@click.option('--sentence', prompt='Something to encrypt', help='Message to encrypt')
@click.option('--shift', '-s', prompt='Shift letters by how much?', type=int, help='Number of characters to shift', show_default=True)
def caesar(sentence, shift):
"""Encrypts a message using a Caesar cipher
Args:
sentence (String): A message to be encrypted
shift (Int): A value to shift all letters by
"""
shift_sent = [letter_shift(letter, shift) for letter in sentence]
click.echo(''.join(shift_sent))
# alternative method using translations
# shift_alpha = alpha_list[shift:] + alpha_list[:shift]
# new_trans = str.maketrans(alpha_list, shift_alpha)
# click.echo(sentence.translate(new_trans))
if __name__ == '__main__':
caesar() | 32.32 | 134 | 0.685644 |
a04654675bd87cc9e171fc0dc24641841deb6b8a | 1,449 | py | Python | crawler/worker.py | mmendez3800/web-crawler-scraper | ee74d631b3bf707024f798369cfbabdfbc0f89dd | [
"MIT"
] | null | null | null | crawler/worker.py | mmendez3800/web-crawler-scraper | ee74d631b3bf707024f798369cfbabdfbc0f89dd | [
"MIT"
] | null | null | null | crawler/worker.py | mmendez3800/web-crawler-scraper | ee74d631b3bf707024f798369cfbabdfbc0f89dd | [
"MIT"
] | null | null | null | from threading import Thread
from inspect import getsource
from utils.download import download
from utils import get_logger
import scraper
import time
class Worker(Thread):
def __init__(self, worker_id, config, frontier):
self.logger = get_logger(f"Worker-{worker_id}", "WORKER")
self.config = config
self.frontier = frontier
# basic check for requests in scraper
assert {getsource(scraper).find(req) for req in {"from requests import", "import requests"}} == {-1}, "Do not use requests from scraper.py"
super().__init__(daemon=True)
def run(self):
# Helper function to intialize scraper
scraper.init()
while True:
tbd_url = self.frontier.get_tbd_url()
if not tbd_url:
self.logger.info("Frontier is empty. Stopping Crawler.")
break
resp = download(tbd_url, self.config, self.logger)
self.logger.info(
f"Downloaded {tbd_url}, status <{resp.status}>, "
f"using cache {self.config.cache_server}.")
scraped_urls = scraper.scraper(tbd_url, resp)
for scraped_url in scraped_urls:
self.frontier.add_url(scraped_url)
self.frontier.mark_url_complete(tbd_url)
time.sleep(self.config.time_delay)
# Helper function to finalize scraper
scraper.create_report()
| 36.225 | 147 | 0.620428 |
d8a7057b49f456b4dd7a76e68ff9dec92812d2c8 | 536 | py | Python | dataset/Scripts/downimg.py | YASH-DIXIT-24/e-marketplace | 4edc5d90153d573601af57c8e596828157115e87 | [
"Apache-2.0"
] | null | null | null | dataset/Scripts/downimg.py | YASH-DIXIT-24/e-marketplace | 4edc5d90153d573601af57c8e596828157115e87 | [
"Apache-2.0"
] | null | null | null | dataset/Scripts/downimg.py | YASH-DIXIT-24/e-marketplace | 4edc5d90153d573601af57c8e596828157115e87 | [
"Apache-2.0"
] | 4 | 2021-09-20T17:21:26.000Z | 2021-11-26T16:11:16.000Z | import os
import pandas as pd
files = os.listdir('./')
try:
os.mkdir('images')
except:
pass
for f in files:
if f.endswith('.csv'):
df = pd.read_csv(f)
df['path'] = pd.NA
cnt = 0
for ind, row in df.iterrows():
if cnt%10:
print(f'Done objects {cnt}')
path = 'images/'+str(ind)+'.png'
cmd = 'curl {} -o {}'.format(str(row['img']), path)
os.system(cmd)
df['path'][ind] = path
cnt+=1
df.to_csv(f)
| 20.615385 | 63 | 0.460821 |
ded4b857841a55349839e1a9fdf7e09b204e3861 | 3,718 | py | Python | src/fuzzingtool/decorators/plugin_meta.py | retr0-13/FuzzingTool | 2fe34911abd583838b7859f83201cd474a2beefc | [
"MIT"
] | null | null | null | src/fuzzingtool/decorators/plugin_meta.py | retr0-13/FuzzingTool | 2fe34911abd583838b7859f83201cd474a2beefc | [
"MIT"
] | null | null | null | src/fuzzingtool/decorators/plugin_meta.py | retr0-13/FuzzingTool | 2fe34911abd583838b7859f83201cd474a2beefc | [
"MIT"
] | null | null | null | # Copyright (c) 2020 - present Vitor Oriel <https://github.com/VitorOriel>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from ..core.bases.base_plugin import Plugin
from ..exceptions.main_exceptions import MetadataException
def plugin_meta(cls: Plugin) -> Plugin:
"""Decorator to check for plugin metadata on a plugin class
@type cls: Plugin
@param cls: The class that call this decorator
"""
_check_mandatory_meta(cls)
if not cls.__author__:
raise MetadataException(f"Author cannot be empty on plugin {cls.__name__}")
if cls.__params__:
_check_params_meta(cls)
if not cls.__desc__:
raise MetadataException(
f"Description cannot be blank on plugin {cls.__name__}"
)
if not cls.__version__:
raise MetadataException(f"Version cannot be blank on plugin {cls.__name__}")
return cls
def _check_mandatory_meta(cls: Plugin) -> None:
"""Checks the mandatory metadata into the plugin decorator
@type cls: Plugin
@param cls: The class with the plugin metadata
"""
metadata = ['__author__', '__params__',
'__desc__', '__type__', '__version__']
class_attr = vars(cls)
for meta in metadata:
if meta not in class_attr:
raise MetadataException(
f"Metadata {meta} not specified on plugin {cls.__name__}"
)
def _check_params_meta(cls: Plugin) -> None:
"""Checks the parameter metadata into the plugin decorator
@type cls: Plugin
@param cls: The class with the plugin metadata
"""
if not (type(cls.__params__) is dict):
raise MetadataException("The parameters must be a "
f"dictionary on plugin {cls.__name__}")
param_dict_keys = cls.__params__.keys()
for key in ['metavar', 'type']:
if key not in param_dict_keys:
raise MetadataException(f"Key {key} must be in parameters "
f"dict on plugin {cls.__name__}")
if not cls.__params__[key]:
raise MetadataException(f"Value of {key} cannot be empty in "
f"parameters dict on plugin {cls.__name__}")
if cls.__params__['type'] is list:
if 'cli_list_separator' not in param_dict_keys:
raise MetadataException("The key 'cli_list_separator' must be present "
"when parameter type is list "
f"on plugin {cls.__name__}")
if not cls.__params__['cli_list_separator']:
raise MetadataException("Value of 'cli_list_separator' "
f"cannot be blank on {cls.__name__}")
| 43.232558 | 84 | 0.665949 |
47b6d133a1e895c9a353b21ed5a1a9dd45bf49f5 | 30,887 | py | Python | demo/migrations/0003_auto__del_field_standardpage_postcode.py | marceloboth/wagtail-cms | 610d74f18782fa05983952051c795c643db54cf9 | [
"BSD-3-Clause"
] | 1 | 2015-08-06T15:00:59.000Z | 2015-08-06T15:00:59.000Z | demo/migrations/0003_auto__del_field_standardpage_postcode.py | marceloboth/wagtail-cms | 610d74f18782fa05983952051c795c643db54cf9 | [
"BSD-3-Clause"
] | null | null | null | demo/migrations/0003_auto__del_field_standardpage_postcode.py | marceloboth/wagtail-cms | 610d74f18782fa05983952051c795c643db54cf9 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting field 'StandardPage.postcode'
db.delete_column(u'demo_standardpage', 'postcode')
def backwards(self, orm):
# Adding field 'StandardPage.postcode'
db.add_column(u'demo_standardpage', 'postcode',
self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True),
keep_default=False)
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'demo.advert': {
'Meta': {'object_name': 'Advert'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'page': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'adverts'", 'null': 'True', 'to': u"orm['wagtailcore.Page']"}),
'text': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
},
u'demo.advertplacement': {
'Meta': {'object_name': 'AdvertPlacement'},
'advert': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'to': u"orm['demo.Advert']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'page': ('modelcluster.fields.ParentalKey', [], {'related_name': "'advert_placements'", 'to': u"orm['wagtailcore.Page']"})
},
u'demo.blogindexpage': {
'Meta': {'object_name': 'BlogIndexPage', '_ormbases': [u'wagtailcore.Page']},
'intro': ('wagtail.wagtailcore.fields.RichTextField', [], {'blank': 'True'}),
u'page_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['wagtailcore.Page']", 'unique': 'True', 'primary_key': 'True'})
},
u'demo.blogindexpagerelatedlink': {
'Meta': {'ordering': "['sort_order']", 'object_name': 'BlogIndexPageRelatedLink'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'link_document': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': u"orm['wagtaildocs.Document']"}),
'link_external': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'link_page': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': u"orm['wagtailcore.Page']"}),
'page': ('modelcluster.fields.ParentalKey', [], {'related_name': "'related_links'", 'to': u"orm['demo.BlogIndexPage']"}),
'sort_order': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'demo.blogpage': {
'Meta': {'object_name': 'BlogPage', '_ormbases': [u'wagtailcore.Page']},
'body': ('wagtail.wagtailcore.fields.RichTextField', [], {}),
'date': ('django.db.models.fields.DateField', [], {}),
'feed_image': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['wagtailimages.Image']"}),
u'page_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['wagtailcore.Page']", 'unique': 'True', 'primary_key': 'True'})
},
u'demo.blogpagecarouselitem': {
'Meta': {'ordering': "['sort_order']", 'object_name': 'BlogPageCarouselItem'},
'caption': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'embed_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['wagtailimages.Image']"}),
'link_document': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': u"orm['wagtaildocs.Document']"}),
'link_external': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'link_page': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': u"orm['wagtailcore.Page']"}),
'page': ('modelcluster.fields.ParentalKey', [], {'related_name': "'carousel_items'", 'to': u"orm['demo.BlogPage']"}),
'sort_order': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
u'demo.blogpagerelatedlink': {
'Meta': {'ordering': "['sort_order']", 'object_name': 'BlogPageRelatedLink'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'link_document': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': u"orm['wagtaildocs.Document']"}),
'link_external': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'link_page': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': u"orm['wagtailcore.Page']"}),
'page': ('modelcluster.fields.ParentalKey', [], {'related_name': "'related_links'", 'to': u"orm['demo.BlogPage']"}),
'sort_order': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'demo.blogpagetag': {
'Meta': {'object_name': 'BlogPageTag'},
'content_object': ('modelcluster.fields.ParentalKey', [], {'related_name': "'tagged_items'", 'to': u"orm['demo.BlogPage']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'demo_blogpagetag_items'", 'to': u"orm['taggit.Tag']"})
},
u'demo.contactpage': {
'Meta': {'object_name': 'ContactPage', '_ormbases': [u'wagtailcore.Page']},
'address_1': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'address_2': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'body': ('wagtail.wagtailcore.fields.RichTextField', [], {'blank': 'True'}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'country': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'feed_image': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['wagtailimages.Image']"}),
u'page_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['wagtailcore.Page']", 'unique': 'True', 'primary_key': 'True'}),
'post_code': ('django.db.models.fields.CharField', [], {'max_length': '10', 'blank': 'True'}),
'telephone': ('django.db.models.fields.CharField', [], {'max_length': '20', 'blank': 'True'})
},
u'demo.eventindexpage': {
'Meta': {'object_name': 'EventIndexPage', '_ormbases': [u'wagtailcore.Page']},
'intro': ('wagtail.wagtailcore.fields.RichTextField', [], {'blank': 'True'}),
u'page_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['wagtailcore.Page']", 'unique': 'True', 'primary_key': 'True'})
},
u'demo.eventindexpagerelatedlink': {
'Meta': {'ordering': "['sort_order']", 'object_name': 'EventIndexPageRelatedLink'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'link_document': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': u"orm['wagtaildocs.Document']"}),
'link_external': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'link_page': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': u"orm['wagtailcore.Page']"}),
'page': ('modelcluster.fields.ParentalKey', [], {'related_name': "'related_links'", 'to': u"orm['demo.EventIndexPage']"}),
'sort_order': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'demo.eventpage': {
'Meta': {'object_name': 'EventPage', '_ormbases': [u'wagtailcore.Page']},
'audience': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'body': ('wagtail.wagtailcore.fields.RichTextField', [], {'blank': 'True'}),
'cost': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'date_from': ('django.db.models.fields.DateField', [], {}),
'date_to': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'feed_image': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['wagtailimages.Image']"}),
'location': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
u'page_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['wagtailcore.Page']", 'unique': 'True', 'primary_key': 'True'}),
'signup_link': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'time_from': ('django.db.models.fields.TimeField', [], {'null': 'True', 'blank': 'True'}),
'time_to': ('django.db.models.fields.TimeField', [], {'null': 'True', 'blank': 'True'})
},
u'demo.eventpagecarouselitem': {
'Meta': {'ordering': "['sort_order']", 'object_name': 'EventPageCarouselItem'},
'caption': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'embed_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['wagtailimages.Image']"}),
'link_document': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': u"orm['wagtaildocs.Document']"}),
'link_external': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'link_page': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': u"orm['wagtailcore.Page']"}),
'page': ('modelcluster.fields.ParentalKey', [], {'related_name': "'carousel_items'", 'to': u"orm['demo.EventPage']"}),
'sort_order': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
u'demo.eventpagerelatedlink': {
'Meta': {'ordering': "['sort_order']", 'object_name': 'EventPageRelatedLink'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'link_document': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': u"orm['wagtaildocs.Document']"}),
'link_external': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'link_page': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': u"orm['wagtailcore.Page']"}),
'page': ('modelcluster.fields.ParentalKey', [], {'related_name': "'related_links'", 'to': u"orm['demo.EventPage']"}),
'sort_order': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'demo.eventpagespeaker': {
'Meta': {'ordering': "['sort_order']", 'object_name': 'EventPageSpeaker'},
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['wagtailimages.Image']"}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'link_document': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': u"orm['wagtaildocs.Document']"}),
'link_external': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'link_page': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': u"orm['wagtailcore.Page']"}),
'page': ('modelcluster.fields.ParentalKey', [], {'related_name': "'speakers'", 'to': u"orm['demo.EventPage']"}),
'sort_order': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
u'demo.homepage': {
'Meta': {'object_name': 'HomePage', '_ormbases': [u'wagtailcore.Page']},
'body': ('wagtail.wagtailcore.fields.RichTextField', [], {'blank': 'True'}),
u'page_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['wagtailcore.Page']", 'unique': 'True', 'primary_key': 'True'})
},
u'demo.homepagecarouselitem': {
'Meta': {'ordering': "['sort_order']", 'object_name': 'HomePageCarouselItem'},
'caption': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'embed_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['wagtailimages.Image']"}),
'link_document': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': u"orm['wagtaildocs.Document']"}),
'link_external': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'link_page': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': u"orm['wagtailcore.Page']"}),
'page': ('modelcluster.fields.ParentalKey', [], {'related_name': "'carousel_items'", 'to': u"orm['demo.HomePage']"}),
'sort_order': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
u'demo.homepagerelatedlink': {
'Meta': {'ordering': "['sort_order']", 'object_name': 'HomePageRelatedLink'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'link_document': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': u"orm['wagtaildocs.Document']"}),
'link_external': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'link_page': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': u"orm['wagtailcore.Page']"}),
'page': ('modelcluster.fields.ParentalKey', [], {'related_name': "'related_links'", 'to': u"orm['demo.HomePage']"}),
'sort_order': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'demo.personpage': {
'Meta': {'object_name': 'PersonPage', '_ormbases': [u'wagtailcore.Page']},
'address_1': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'address_2': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'biography': ('wagtail.wagtailcore.fields.RichTextField', [], {'blank': 'True'}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'country': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'feed_image': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['wagtailimages.Image']"}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'image': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['wagtailimages.Image']"}),
'intro': ('wagtail.wagtailcore.fields.RichTextField', [], {'blank': 'True'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
u'page_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['wagtailcore.Page']", 'unique': 'True', 'primary_key': 'True'}),
'post_code': ('django.db.models.fields.CharField', [], {'max_length': '10', 'blank': 'True'}),
'telephone': ('django.db.models.fields.CharField', [], {'max_length': '20', 'blank': 'True'})
},
u'demo.personpagerelatedlink': {
'Meta': {'ordering': "['sort_order']", 'object_name': 'PersonPageRelatedLink'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'link_document': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': u"orm['wagtaildocs.Document']"}),
'link_external': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'link_page': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': u"orm['wagtailcore.Page']"}),
'page': ('modelcluster.fields.ParentalKey', [], {'related_name': "'related_links'", 'to': u"orm['demo.PersonPage']"}),
'sort_order': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'demo.standardindexpage': {
'Meta': {'object_name': 'StandardIndexPage', '_ormbases': [u'wagtailcore.Page']},
'feed_image': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['wagtailimages.Image']"}),
'intro': ('wagtail.wagtailcore.fields.RichTextField', [], {'blank': 'True'}),
u'page_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['wagtailcore.Page']", 'unique': 'True', 'primary_key': 'True'})
},
u'demo.standardindexpagerelatedlink': {
'Meta': {'ordering': "['sort_order']", 'object_name': 'StandardIndexPageRelatedLink'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'link_document': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': u"orm['wagtaildocs.Document']"}),
'link_external': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'link_page': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': u"orm['wagtailcore.Page']"}),
'page': ('modelcluster.fields.ParentalKey', [], {'related_name': "'related_links'", 'to': u"orm['demo.StandardIndexPage']"}),
'sort_order': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'demo.standardpage': {
'Meta': {'object_name': 'StandardPage', '_ormbases': [u'wagtailcore.Page']},
'body': ('wagtail.wagtailcore.fields.RichTextField', [], {'blank': 'True'}),
'feed_image': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['wagtailimages.Image']"}),
'intro': ('wagtail.wagtailcore.fields.RichTextField', [], {'blank': 'True'}),
u'page_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['wagtailcore.Page']", 'unique': 'True', 'primary_key': 'True'})
},
u'demo.standardpagecarouselitem': {
'Meta': {'ordering': "['sort_order']", 'object_name': 'StandardPageCarouselItem'},
'caption': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'embed_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['wagtailimages.Image']"}),
'link_document': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': u"orm['wagtaildocs.Document']"}),
'link_external': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'link_page': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': u"orm['wagtailcore.Page']"}),
'page': ('modelcluster.fields.ParentalKey', [], {'related_name': "'carousel_items'", 'to': u"orm['demo.StandardPage']"}),
'sort_order': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
u'demo.standardpagerelatedlink': {
'Meta': {'ordering': "['sort_order']", 'object_name': 'StandardPageRelatedLink'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'link_document': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': u"orm['wagtaildocs.Document']"}),
'link_external': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'link_page': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': u"orm['wagtailcore.Page']"}),
'page': ('modelcluster.fields.ParentalKey', [], {'related_name': "'related_links'", 'to': u"orm['demo.StandardPage']"}),
'sort_order': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'taggit.tag': {
'Meta': {'object_name': 'Tag'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100'})
},
u'wagtailcore.page': {
'Meta': {'object_name': 'Page'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'pages'", 'to': u"orm['contenttypes.ContentType']"}),
'depth': ('django.db.models.fields.PositiveIntegerField', [], {}),
'has_unpublished_changes': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'live': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'numchild': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'owned_pages'", 'null': 'True', 'to': u"orm['auth.User']"}),
'path': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'search_description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'seo_title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'show_in_menus': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'url_path': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'})
},
u'wagtaildocs.document': {
'Meta': {'object_name': 'Document'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'file': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'uploaded_by_user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
u'wagtailimages.image': {
'Meta': {'object_name': 'Image'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'file': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),
'height': ('django.db.models.fields.IntegerField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'uploaded_by_user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'width': ('django.db.models.fields.IntegerField', [], {})
}
}
complete_apps = ['demo'] | 90.844118 | 204 | 0.566258 |
0b7b8b4a254da93b49ee7a615d30567522d1749f | 3,725 | py | Python | vqa_experiments/clevr/preprocess_clevr.py | Bidur-Khanal/REMIND | 4eeb6bce7a27d814c94948e2790efedacd014af1 | [
"MIT"
] | 67 | 2020-06-29T14:30:40.000Z | 2022-02-24T06:14:50.000Z | vqa_experiments/clevr/preprocess_clevr.py | msrocean/REMIND | 2e82ca75a3e4d4ccba00c5a763097cc0f650a0a4 | [
"MIT"
] | 5 | 2020-08-14T17:01:39.000Z | 2021-09-12T10:41:25.000Z | vqa_experiments/clevr/preprocess_clevr.py | msrocean/REMIND | 2e82ca75a3e4d4ccba00c5a763097cc0f650a0a4 | [
"MIT"
] | 19 | 2020-07-04T14:59:26.000Z | 2022-02-15T11:24:52.000Z | """
Written by Kushal, modified by Robik
"""
import json
import h5py
import numpy as np
from collections import Counter, defaultdict
from tqdm import tqdm
PATH = '/hdd/robik/CLEVR'
CATEGORIES = {'count': ['count'],
'compare_attribute':
['equal_color',
'equal_material',
'equal_shape',
'equal_size'],
'exist': ['exist'],
'compare_integer': [
'greater_than',
'less_than',
'equal_integer'],
'query_attribute': [
'query_color',
'query_material',
'query_shape',
'query_size']
}
categories_fine = dict()
for k, v in CATEGORIES.items():
for vi in v:
categories_fine[vi] = k
annotations = dict()
for split in ['train', 'val']:
annotations[split] = json.load(
open(f'{PATH}/questions/CLEVR_{split}_questions.json'))['questions']
meta = defaultdict(list)
for ann in annotations['train']:
ans = ann['answer']
meta['a'].append(ans)
meta['atype'].append('answer_type')
meta['qtype'].append(categories_fine[ann['program'][-1]['function']])
lut = dict()
for m in ['a', 'atype', 'qtype']:
most_common = Counter(meta[m]).most_common()
lut[f'{m}2idx'] = {a[0]: idx for idx, a in enumerate(most_common)}
json.dump(lut, open(f'{LUT_tdiuc}/LUT_clevr.json', 'w'))
# %%
dt = h5py.special_dtype(vlen=str)
for split in ['train', 'val']:
qfeat_file = h5py.File(f'{PATH}/questions_{split}_clevr.h5', 'r')
mem_feat = dict()
for dset in qfeat_file.keys():
mem_feat[dset] = qfeat_file[dset][:]
qids = mem_feat['qids'][:]
qid2idx = {qid: idx for idx, qid in enumerate(qids)}
num_instances = len(annotations[split])
h5file = h5py.File(f'{PATH}/{split}_clevr.h5', 'w')
h5file.create_dataset('qfeat', (num_instances, 2048), dtype=np.float32)
h5file.create_dataset('qid', (num_instances,), dtype=np.int64)
h5file.create_dataset('iid', (num_instances,), dtype=np.int64)
h5file.create_dataset('q', (num_instances,), dtype=dt)
h5file.create_dataset('a', (num_instances,), dtype=dt)
h5file.create_dataset('ten_ans', (num_instances, 10), dtype=dt)
h5file.create_dataset('aidx', (num_instances,), dtype=np.int32)
h5file.create_dataset('ten_aidx', (num_instances, 10), dtype=np.int32)
h5file.create_dataset('atypeidx', (num_instances,), dtype=np.int32)
h5file.create_dataset('qtypeidx', (num_instances,), dtype=np.int32)
for idx, ann in enumerate(tqdm(annotations[split])):
qid = ann['question_index']
if split == 'train':
iid = int('1' + str(ann['image_index']))
elif split == 'val':
iid = int('2' + str(ann['image_index']))
else:
raise (Exception)
feat_idx = qid2idx[qid]
ten_ans = [ann['answer']] * 10
ans = ten_ans[0]
aidx = lut['a2idx'].get(ans, -1)
ten_aidx = np.array([lut['a2idx'].get(a, -1) for a in ten_ans])
atypeidx = lut['atype2idx'].get('answer_type', -1)
qtype_clevr = categories_fine[ann['program'][-1]['function']]
qtypeidx = lut['qtype2idx'].get(qtype_clevr, -1)
h5file['qfeat'][idx] = mem_feat['feats'][feat_idx]
h5file['qid'][idx] = qid
h5file['iid'][idx] = iid
h5file['q'][idx] = mem_feat['questions'][feat_idx]
h5file['a'][idx] = ans
h5file['ten_ans'][idx] = ten_ans
h5file['aidx'][idx] = aidx
h5file['atypeidx'][idx] = atypeidx
h5file['qtypeidx'][idx] = qtypeidx
h5file['ten_aidx'][idx] = ten_aidx
h5file.close()
| 33.863636 | 76 | 0.584161 |
e7106d1333953c06c32c23934144508cf5e74794 | 470 | py | Python | torch_glow/tests/nodes/contiguous_test.py | 842974287/glow | 7d77eb9a1c00dbba77321f62ad9c9078beb2b725 | [
"Apache-2.0"
] | null | null | null | torch_glow/tests/nodes/contiguous_test.py | 842974287/glow | 7d77eb9a1c00dbba77321f62ad9c9078beb2b725 | [
"Apache-2.0"
] | null | null | null | torch_glow/tests/nodes/contiguous_test.py | 842974287/glow | 7d77eb9a1c00dbba77321f62ad9c9078beb2b725 | [
"Apache-2.0"
] | null | null | null | from __future__ import absolute_import, division, print_function, unicode_literals
import unittest
import torch
from tests.utils import jitVsGlow
class TestContiguous(unittest.TestCase):
def test_contiguous_basic(self):
"""Test of the PyTorch contiguous Node on Glow."""
def contiguous_basic(a):
return a.contiguous()
x = torch.randn(2, 2, 2)
jitVsGlow(contiguous_basic, x, expected_fused_ops={"aten::contiguous"})
| 24.736842 | 82 | 0.712766 |
3e4833d0bc3b22b9e6037f0057c7d8cecde831a7 | 8,862 | py | Python | test/functional/wallet_abandonconflict.py | VaderCoinProject/vadercoin | b513c794b014d40e5aad281dd1f54845c46d216c | [
"MIT"
] | null | null | null | test/functional/wallet_abandonconflict.py | VaderCoinProject/vadercoin | b513c794b014d40e5aad281dd1f54845c46d216c | [
"MIT"
] | null | null | null | test/functional/wallet_abandonconflict.py | VaderCoinProject/vadercoin | b513c794b014d40e5aad281dd1f54845c46d216c | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2014-2020 The Vadercoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the abandontransaction RPC.
The abandontransaction RPC marks a transaction and all its in-wallet
descendants as abandoned which allows their inputs to be respent. It can be
used to replace "stuck" or evicted transactions. It only works on transactions
which are not included in a block and are not currently in the mempool. It has
no effect on transactions which are already abandoned.
"""
from decimal import Decimal
from test_framework.blocktools import COINBASE_MATURITY
from test_framework.test_framework import VadercoinTestFramework
from test_framework.util import (
assert_equal,
assert_raises_rpc_error,
)
class AbandonConflictTest(VadercoinTestFramework):
def set_test_params(self):
self.num_nodes = 2
self.extra_args = [["-minrelaytxfee=0.00001"], []]
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
self.nodes[1].generate(COINBASE_MATURITY)
self.sync_blocks()
balance = self.nodes[0].getbalance()
txA = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), Decimal("10"))
txB = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), Decimal("10"))
txC = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), Decimal("10"))
self.sync_mempools()
self.nodes[1].generate(1)
# Can not abandon non-wallet transaction
assert_raises_rpc_error(-5, 'Invalid or non-wallet transaction id', lambda: self.nodes[0].abandontransaction(txid='ff' * 32))
# Can not abandon confirmed transaction
assert_raises_rpc_error(-5, 'Transaction not eligible for abandonment', lambda: self.nodes[0].abandontransaction(txid=txA))
self.sync_blocks()
newbalance = self.nodes[0].getbalance()
assert balance - newbalance < Decimal("0.001") #no more than fees lost
balance = newbalance
# Disconnect nodes so node0's transactions don't get into node1's mempool
self.disconnect_nodes(0, 1)
# Identify the 10btc outputs
nA = next(tx_out["vout"] for tx_out in self.nodes[0].gettransaction(txA)["details"] if tx_out["amount"] == Decimal("10"))
nB = next(tx_out["vout"] for tx_out in self.nodes[0].gettransaction(txB)["details"] if tx_out["amount"] == Decimal("10"))
nC = next(tx_out["vout"] for tx_out in self.nodes[0].gettransaction(txC)["details"] if tx_out["amount"] == Decimal("10"))
inputs = []
# spend 10btc outputs from txA and txB
inputs.append({"txid": txA, "vout": nA})
inputs.append({"txid": txB, "vout": nB})
outputs = {}
outputs[self.nodes[0].getnewaddress()] = Decimal("14.99998")
outputs[self.nodes[1].getnewaddress()] = Decimal("5")
signed = self.nodes[0].signrawtransactionwithwallet(self.nodes[0].createrawtransaction(inputs, outputs))
txAB1 = self.nodes[0].sendrawtransaction(signed["hex"])
# Identify the 14.99998btc output
nAB = next(tx_out["vout"] for tx_out in self.nodes[0].gettransaction(txAB1)["details"] if tx_out["amount"] == Decimal("14.99998"))
#Create a child tx spending AB1 and C
inputs = []
inputs.append({"txid": txAB1, "vout": nAB})
inputs.append({"txid": txC, "vout": nC})
outputs = {}
outputs[self.nodes[0].getnewaddress()] = Decimal("24.9996")
signed2 = self.nodes[0].signrawtransactionwithwallet(self.nodes[0].createrawtransaction(inputs, outputs))
txABC2 = self.nodes[0].sendrawtransaction(signed2["hex"])
# Create a child tx spending ABC2
signed3_change = Decimal("24.999")
inputs = [{"txid": txABC2, "vout": 0}]
outputs = {self.nodes[0].getnewaddress(): signed3_change}
signed3 = self.nodes[0].signrawtransactionwithwallet(self.nodes[0].createrawtransaction(inputs, outputs))
# note tx is never directly referenced, only abandoned as a child of the above
self.nodes[0].sendrawtransaction(signed3["hex"])
# In mempool txs from self should increase balance from change
newbalance = self.nodes[0].getbalance()
assert_equal(newbalance, balance - Decimal("30") + signed3_change)
balance = newbalance
# Restart the node with a higher min relay fee so the parent tx is no longer in mempool
# TODO: redo with eviction
self.restart_node(0, extra_args=["-minrelaytxfee=0.0001"])
assert self.nodes[0].getmempoolinfo()['loaded']
# Verify txs no longer in either node's mempool
assert_equal(len(self.nodes[0].getrawmempool()), 0)
assert_equal(len(self.nodes[1].getrawmempool()), 0)
# Not in mempool txs from self should only reduce balance
# inputs are still spent, but change not received
newbalance = self.nodes[0].getbalance()
assert_equal(newbalance, balance - signed3_change)
# Unconfirmed received funds that are not in mempool, also shouldn't show
# up in unconfirmed balance
balances = self.nodes[0].getbalances()['mine']
assert_equal(balances['untrusted_pending'] + balances['trusted'], newbalance)
# Also shouldn't show up in listunspent
assert not txABC2 in [utxo["txid"] for utxo in self.nodes[0].listunspent(0)]
balance = newbalance
# Abandon original transaction and verify inputs are available again
# including that the child tx was also abandoned
self.nodes[0].abandontransaction(txAB1)
newbalance = self.nodes[0].getbalance()
assert_equal(newbalance, balance + Decimal("30"))
balance = newbalance
# Verify that even with a low min relay fee, the tx is not reaccepted from wallet on startup once abandoned
self.restart_node(0, extra_args=["-minrelaytxfee=0.00001"])
assert self.nodes[0].getmempoolinfo()['loaded']
assert_equal(len(self.nodes[0].getrawmempool()), 0)
assert_equal(self.nodes[0].getbalance(), balance)
# But if it is received again then it is unabandoned
# And since now in mempool, the change is available
# But its child tx remains abandoned
self.nodes[0].sendrawtransaction(signed["hex"])
newbalance = self.nodes[0].getbalance()
assert_equal(newbalance, balance - Decimal("20") + Decimal("14.99998"))
balance = newbalance
# Send child tx again so it is unabandoned
self.nodes[0].sendrawtransaction(signed2["hex"])
newbalance = self.nodes[0].getbalance()
assert_equal(newbalance, balance - Decimal("10") - Decimal("14.99998") + Decimal("24.9996"))
balance = newbalance
# Remove using high relay fee again
self.restart_node(0, extra_args=["-minrelaytxfee=0.0001"])
assert self.nodes[0].getmempoolinfo()['loaded']
assert_equal(len(self.nodes[0].getrawmempool()), 0)
newbalance = self.nodes[0].getbalance()
assert_equal(newbalance, balance - Decimal("24.9996"))
balance = newbalance
# Create a double spend of AB1 by spending again from only A's 10 output
# Mine double spend from node 1
inputs = []
inputs.append({"txid": txA, "vout": nA})
outputs = {}
outputs[self.nodes[1].getnewaddress()] = Decimal("9.9999")
tx = self.nodes[0].createrawtransaction(inputs, outputs)
signed = self.nodes[0].signrawtransactionwithwallet(tx)
self.nodes[1].sendrawtransaction(signed["hex"])
self.nodes[1].generate(1)
self.connect_nodes(0, 1)
self.sync_blocks()
# Verify that B and C's 10 VADE outputs are available for spending again because AB1 is now conflicted
newbalance = self.nodes[0].getbalance()
assert_equal(newbalance, balance + Decimal("20"))
balance = newbalance
# There is currently a minor bug around this and so this test doesn't work. See Issue #7315
# Invalidate the block with the double spend and B's 10 VADE output should no longer be available
# Don't think C's should either
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
newbalance = self.nodes[0].getbalance()
#assert_equal(newbalance, balance - Decimal("10"))
self.log.info("If balance has not declined after invalidateblock then out of mempool wallet tx which is no longer")
self.log.info("conflicted has not resumed causing its inputs to be seen as spent. See Issue #7315")
self.log.info(str(balance) + " -> " + str(newbalance) + " ?")
if __name__ == '__main__':
AbandonConflictTest().main()
| 48.163043 | 138 | 0.669375 |
9119102659446cbbdfcad1e01f295e4880b5cc44 | 3,565 | py | Python | piltdown/literals.py | EvansWinner/piltdown | 94492784ecd7bb7a18f6f171a013bc8701603e8b | [
"BSD-2-Clause"
] | null | null | null | piltdown/literals.py | EvansWinner/piltdown | 94492784ecd7bb7a18f6f171a013bc8701603e8b | [
"BSD-2-Clause"
] | null | null | null | piltdown/literals.py | EvansWinner/piltdown | 94492784ecd7bb7a18f6f171a013bc8701603e8b | [
"BSD-2-Clause"
] | null | null | null | """Global and shared variables for or potentially for any chart type."""
from typing import Dict, List
EMPTY_BLOCK: str = " "
LATIN_CHARS: str = "abcdefghijklmnopqrstuvwxyz"
GREEK_CHARS: str = "αβγδεζηθικλμνξοπρστυφχψω"
DIGITS = "0123456789"
# When not supplied with a list of labels, we use these
DEFAULT_CHARS: str = (
LATIN_CHARS.upper() + GREEK_CHARS.upper() + LATIN_CHARS + GREEK_CHARS
)
DEFAULT_LABELS: List[str] = list(DEFAULT_CHARS)
FULLWIDTH_KEYS: str = (
" "
+ DIGITS
+ LATIN_CHARS.upper()
+ LATIN_CHARS
+ ",.:;!?\"'`^~_&@#%+-*=<>()[]{}|/\\$"
)
FULLWIDTH_CHARS: str = (
EMPTY_BLOCK
+ "0123456789"
+ "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
+ "abcdefghijklmnopqrstuvwxyz"
+ ",.:;!?"'`^~_&@#%+-*=<>()[]{}¦/\$"
)
FULLWIDTH: Dict[str, str] = dict(zip(FULLWIDTH_KEYS, FULLWIDTH_CHARS))
MONOSPACE_CHARS: str = (
"\n"
+ "𝙰𝙱𝙲𝙳𝙴𝙵𝙶𝙷𝙸𝙹𝙺𝙻𝙼𝙽𝙾𝙿𝚀𝚁𝚂𝚃𝚄𝚅𝚆𝚇𝚈𝚉"
+ "𝚊𝚋𝚌𝚍𝚎𝚏𝚐𝚑𝚒𝚓𝚔𝚕𝚖𝚗𝚘𝚙𝚚𝚛𝚜𝚝𝚞𝚟𝚠𝚡𝚢𝚣"
+ "𝟶𝟷𝟸𝟹𝟺𝟻𝟼𝟽𝟾𝟿"
)
MONOSPACE_KEYS: str = (
"\n"
+ LATIN_CHARS.upper()
+ LATIN_CHARS
+ DIGITS
)
MONOSPACE: Dict[str, str] = dict(zip(MONOSPACE_KEYS, MONOSPACE_CHARS))
BOLD_KEYS: str = LATIN_CHARS.upper() + LATIN_CHARS + GREEK_CHARS.upper() + GREEK_CHARS + DIGITS
BOLD_CHARS: str = (
"𝐀𝐁𝐂𝐃𝐄𝐅𝐆𝐇𝐈𝐉𝐊𝐋𝐌𝐍𝐎𝐏𝐐𝐑𝐒𝐓𝐔𝐕𝐖𝐗𝐘𝐙"
+ "𝐚𝐛𝐜𝐝𝐞𝐟𝐠𝐡𝐢𝐣𝐤𝐥𝐦𝐧𝐨𝐩𝐪𝐫𝐬𝐭𝐮𝐯𝐰𝐱𝐲𝐳"
+ "𝚨𝚩𝚪𝚫𝚬𝚭𝚮𝚯𝚰𝚱𝚲𝚳𝚴𝚵𝚶𝚷𝚸𝚺𝚻𝚼𝚽𝚾𝚿𝛀"
+ "𝛂𝛃𝛄𝛅𝛆𝛇𝛈𝛉𝛊𝛋𝛌𝛍𝛎𝛏𝛐𝛑𝛒𝛔𝛕𝛖𝛗𝛘𝛙𝛚"
+ "𝟎𝟏𝟐𝟑𝟒𝟓𝟔𝟕𝟖𝟗𝟬"
)
BOLD: Dict[str, str] = dict(zip(BOLD_KEYS, BOLD_CHARS))
# Win/Loss Sparklines
LOSS_CHAR: str = "▄"
WIN_CHAR: str = "▀"
ZERO_CHAR: str = "-"
# Horizontal bar charts
EIGHT_EIGHTHS: str = "█"
ZERO_EIGHTHS: str = "⠀" # Not a regular space char
EIGHTHS: Dict[int, str] = {
7: "▉",
6: "▊",
5: "▋",
4: "▌",
3: "▍",
2: "▎",
1: "▏",
0: "",
}
# Waffle charts
WAFFLES = "▩▥▦▤▧▨▣"
# Column sparklines
V8THS = {
0: EMPTY_BLOCK,
1: "▁",
2: "▂",
3: "▃",
4: "▄",
5: "▅",
6: "▆",
7: "▇",
8: "█",
}
# Xs and checks
X = "✗"
CHECK = "✓"
# Tally charts
TALLY_ONE: str = "𝍩"
TALLY_TWO: str = "𝍪"
TALLY_THREE: str = "𝍫"
TALLY_FOUR: str = "𝍬"
TALLY_FIVE: str = "ᚎ"
ONEZIE_TWOZIES: Dict[int, str] = {
0: "",
1: TALLY_ONE,
2: TALLY_TWO,
3: TALLY_THREE,
4: TALLY_FOUR,
}
# Dot charts
HDOT_ONE: str = "⚫"
DOT_ONE: str = "*"
# Scaled up numbers
DEFAULT3X5FONT: Dict[str, List[str]] = {
"%": [
"█░▞",
"░▐░",
"░▞░",
"░▌░",
"▞░█",
],
"0": [
"░█▙",
"█░█",
"█░█",
"█░█",
"▜█░",
],
"1": [
"░█░",
"▞█░",
"░█░",
"░█░",
"▗█▖",
],
"2": [
"▄▆▖",
"▘░▛",
"░▞░",
"▐░░",
"█▄▟",
],
"3": [
"▟▀▙",
"░░█",
"░█░",
"░░█",
"▜▄▛",
],
"4": [
"░░▟",
"░▞█",
"▟▄█",
"░░█",
"░░█",
],
"5": [
"▛▀▜",
"▌░░",
"█▀▙",
"░░▐",
"█▄▛",
],
"6": [
"▄▀▙",
"█░░",
"██▙",
"█░█",
"▜█▛",
],
"7": [
"▛▀█",
"░░▛",
"░▞░",
"▐░░",
"█░░",
],
"8": [
"▟█▙",
"█░█",
"███",
"█░█",
"▜█▛",
],
"9": [
"▟█▙",
"█░█",
"▜██",
"░░█",
"▗█▘",
],
",": [
"░░░",
"░░░",
"░░░",
"░░░",
"░▜░",
],
".": [
"░░░",
"░░░",
"░░░",
"░░░",
"░▖░",
],
}
| 16.353211 | 95 | 0.396073 |
712115337a25aaa510429fb1538a6531ae843623 | 1,564 | py | Python | binance/api/model.py | cottonmalone/binance-dex | 2b33e5d0dd0e24e78eea58e857d9872e5adc8c5f | [
"MIT"
] | null | null | null | binance/api/model.py | cottonmalone/binance-dex | 2b33e5d0dd0e24e78eea58e857d9872e5adc8c5f | [
"MIT"
] | null | null | null | binance/api/model.py | cottonmalone/binance-dex | 2b33e5d0dd0e24e78eea58e857d9872e5adc8c5f | [
"MIT"
] | null | null | null | from collections import namedtuple
from kim import Mapper, field
class BaseModel(object):
"""
Base model class for mapped classes.
Provides programmatic constructor and convenience methods for equivalence
and printing.
"""
def __init__(self, **kwargs):
for key, value in kwargs.items():
setattr(self, key, value)
def __eq__(self, other):
return isinstance(other, self.__class__) and \
self.__dict__ == other.__dict__
def __repr__(self):
# make this look like a namedtuple
cls = namedtuple(self.__class__.__name__, self.__dict__.keys())
return cls(**self.__dict__).__str__()
def map_to_object(mapper_cls):
"""
Decorator function to map the returned dictionary to mapped object.
Args:
mapper_cls (cls): The class of the mapper.
Returns:
object: The mapped object.
"""
def wrap(f):
def wrapped_f(*args, **kwargs):
# create mapper with return data
mapper = mapper_cls(data=f(*args, **kwargs))
# return marshaled object
return mapper.marshal()
return wrapped_f
return wrap
class Time(BaseModel):
"""
Class to represent current and block time.
Attributes:
ap_time (datetime.datetime): Current time.
block_time (datetime.datetime): Block time.
"""
pass
class TimeMapper(Mapper):
"""
Mapper for Time class.
"""
__type__ = Time
ap_time = field.DateTime()
block_time = field.DateTime()
| 22.028169 | 77 | 0.623402 |
c88c343ad4b8a5a5393c68318e6aee2d7a8932d9 | 987 | py | Python | problems/131.Palindrome_Partitioning/AC_dp_nm.py | subramp-prep/leetcode | d125201d9021ab9b1eea5e5393c2db4edd84e740 | [
"Unlicense"
] | null | null | null | problems/131.Palindrome_Partitioning/AC_dp_nm.py | subramp-prep/leetcode | d125201d9021ab9b1eea5e5393c2db4edd84e740 | [
"Unlicense"
] | null | null | null | problems/131.Palindrome_Partitioning/AC_dp_nm.py | subramp-prep/leetcode | d125201d9021ab9b1eea5e5393c2db4edd84e740 | [
"Unlicense"
] | null | null | null | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Author: illuz <iilluzen[at]gmail.com>
# File: AC_dp_nm.py
# Create Date: 2015-03-19 09:45:12
# Usage: AC_dp_nm.py
# Descripton:
class Solution:
# @param s, a string
# @return a list of lists of string
def partition(self, s):
slen = len(s)
indexes = [[] for _ in range(slen)]
# pre calculate index
for i in range(slen):
for j in range(i, slen):
if s[i: j + 1] == s[i: j + 1][: : -1]:
indexes[i].append(j + 1)
ret = []
single = []
def find_partition(start):
for end in indexes[start]:
single.append(s[start: end])
if end == slen:
ret.append(single[:])
else:
find_partition(end)
single.pop()
find_partition(0)
return ret
# debug
s = Solution()
print s.partition('aab')
| 23.5 | 54 | 0.47619 |
973e6c1643d8ab4155e0f66050a5582c56cdc7ac | 1,500 | py | Python | samples/generated_samples/cloudoptimization_v1_generated_fleet_routing_optimize_tours_async.py | changsongd/python-optimization | 9f574507010ef637e5a6912a1cb725b782c03cf4 | [
"Apache-2.0"
] | null | null | null | samples/generated_samples/cloudoptimization_v1_generated_fleet_routing_optimize_tours_async.py | changsongd/python-optimization | 9f574507010ef637e5a6912a1cb725b782c03cf4 | [
"Apache-2.0"
] | 4 | 2022-03-24T18:26:49.000Z | 2022-03-31T19:18:00.000Z | samples/generated_samples/cloudoptimization_v1_generated_fleet_routing_optimize_tours_async.py | changsongd/python-optimization | 9f574507010ef637e5a6912a1cb725b782c03cf4 | [
"Apache-2.0"
] | 2 | 2022-03-24T19:22:44.000Z | 2022-03-28T21:19:59.000Z | # -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for OptimizeTours
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-optimization
# [START cloudoptimization_v1_generated_FleetRouting_OptimizeTours_async]
from google.cloud import optimization_v1
async def sample_optimize_tours():
# Create a client
client = optimization_v1.FleetRoutingAsyncClient()
# Initialize request argument(s)
request = optimization_v1.OptimizeToursRequest(
parent="parent_value",
)
# Make the request
response = await client.optimize_tours(request=request)
# Handle the response
print(response)
# [END cloudoptimization_v1_generated_FleetRouting_OptimizeTours_async]
| 32.608696 | 85 | 0.766 |
6fe32008ab356ed0f073decf326fb4776e367afa | 5,878 | py | Python | uniq_rse_analysis.py | etri-city-traffic-brain/traffic_demand_tools | 1ac4a691948c8ea073f8145d0472898fc0c4bfaa | [
"Apache-2.0"
] | 6 | 2020-11-23T05:07:51.000Z | 2021-12-27T05:10:16.000Z | uniq_rse_analysis.py | etri-city-traffic-brain/traffic_demand_tools | 1ac4a691948c8ea073f8145d0472898fc0c4bfaa | [
"Apache-2.0"
] | null | null | null | uniq_rse_analysis.py | etri-city-traffic-brain/traffic_demand_tools | 1ac4a691948c8ea073f8145d0472898fc0c4bfaa | [
"Apache-2.0"
] | null | null | null | import pandas as pd
import sys
import os
import openpyxl
import seaborn as sns
import matplotlib.pyplot as plt
import matplotlib.patheffects as path_effects
# Filtering according to target date and target time
# Cureent Target Time: 09:30 to 10:30 (1hour)
# If you need to change the time, you can modify following function.
def checktargetdate(x, targetdates):
data = {}
data['date'] = x[:8]
if x[:8] in targetdates:
if int(x[8:]) >= 93000 and int(x[8:])<=103000:
data['check'] = 1
else:
data['check'] = 0
else:
data['check'] = 0
return pd.Series(data=data, index=['date', 'check'])
def checktargetdate2(x, tdate):
if x[:8] == tdate:
return 1
else:
return 0
def checkarea(x, targetspot):
if (x['BEFORE_RSE_ID'] in targetspot) or (x['RSE_ID'] in targetspot):
return 1
else:
return 0
def checksection(x, br, r):
if (x['BEFORE_RSE_ID'] == br) and (x['RSE_ID'] == r):
return 1
else:
return 0
def checksection2(x, rsesct):
if (x['BEFORE_RSE_ID'] + '-' + x['RSE_ID']) in rsesct:
return x['BEFORE_RSE_ID'] + '-' + x['RSE_ID']
else:
return 'no target'
def add_median_lables(ax):
lines = ax.get_lines()
boxes = [child for child in ax.get_children() if type(child).__name__ == 'PathPatch']
lines_per_box = int(len(lines) / len(boxes))
for median in lines[4:len(lines):lines_per_box]:
x, y = (data.mean() for data in median.get_data())
text = ax.text(x, y, f'{y:.1f}', ha='center', va='center', fontweight='bold', color= 'white')
text.set_path_effects([
path_effects.Stroke(linewidth=3, foreground=median.get_color()),
path_effects.Normal(),
])
def main(targetrsefile, targetdate):
targetdates = []
targetdates.append(targetdate)
targetspot = ['RSE1507', 'RSE1504', 'RSE8102', 'RSE8507'] # Target RSE Spots in Target Area --> It could be changed and modified to others according to Target Area
print('target date: {}'.format(targetdates))
df = pd.read_excel(targetrsefile, sheet_name='RSE', engine='openpyxl')
df = df.reset_index()
df = df[['등록_시각', 'OBE_ID', '전 RSE ID', '현재 RSE ID', '전 수집 일자', '현재 수집 일자', '전-현재 통과시간(초)', '전-현재 처리에러코드']]
df.columns = ['REG_YMDHMS', 'OBE_ID', 'BEFORE_RSE_ID', 'RSE_ID', 'BEFORE_COLCT_YMDHMS', 'NOW_COLCT_YMDHMS', 'BEFORE_NOW_TRVL_TM', 'BEFORE_NOW_PROCESS_ERROR_CD']
df[['date', 'check']] = df['NOW_COLCT_YMDHMS'].apply(lambda x: checktargetdate(str(x),targetdates))
is_target = df['check'] == 1
df = df[is_target]
df['check2'] = df[['BEFORE_RSE_ID', 'RSE_ID']].apply(lambda x: checkarea(x,targetspot), axis=1)
is_target2 = df['check2'] == 1
df = df[is_target2]
is_check = df['BEFORE_NOW_PROCESS_ERROR_CD'] == 1305 # the code '1305' means the 'Have an Error'
df = df[is_check]
targetsection = ['RSE8507-RSE8102', 'RSE8102-RSE1504', 'RSE1504-RSE1507', 'RSE1507-RSE1504', 'RSE8102-RSE8507']
print('Target RSE Section: {}'.format(targetsection))
for tdate in targetdates:
is_target3 = df['date'] == tdate
temp = df[is_target3]
print(temp)
print('DATE: {}, Total No.: {} (Unique No.: {}) '.format(tdate, temp['OBE_ID'].count(), len(temp['OBE_ID'].unique())))
temp['rse_section']= temp[['BEFORE_RSE_ID', 'RSE_ID']].apply(lambda x: checksection2(x, targetsection), axis=1)
is_target_sct = temp['rse_section'] != 'no target'
temp2 = temp[is_target_sct]
temp2 = temp2.drop_duplicates(['OBE_ID', 'BEFORE_RSE_ID', 'RSE_ID'])
temp2 = temp2.sort_values(by='rse_section')
## boxplot
figure = plt.figure(figsize=(15, 12))
sns.set(style='whitegrid')
bplot = sns.boxplot(x='rse_section', y='BEFORE_NOW_TRVL_TM', data=temp2, showmeans=True,
meanprops={'marker': 'o', 'markerfacecolor': 'blue', 'markeredgecolor': 'black',
'markersize': '10'},
width=0.5, linewidth=0.75)
plt.ylim(0, 700)
add_median_lables(bplot.axes)
xlabels = [x.get_text() for x in bplot.get_xticklabels()]
nobs = temp2.groupby('rse_section')['BEFORE_NOW_TRVL_TM'].count()
for i, l in enumerate(xlabels):
n = nobs[l]
bplot.annotate('n={}'.format(n), xy=(i, 0.01), xycoords=('data', 'axes fraction'), ha='center')
bplot.axes.set_title('Travel Time per RSE Section', fontsize=16)
bplot.set_xlabel('RSE Sections', fontsize=18)
bplot.set_ylabel('Travel Time [s]', fontsize=18)
bplot.tick_params(labelsize=12)
resultdir = './output'
try:
if not os.path.exists(resultdir):
os.makedirs(resultdir)
except OSError:
print('[Error] Cannot create the Result Output Directory {}'.format(resultdir))
resultfile = os.path.join(resultdir, 'RSE_Analysis_Result_Fig'+ tdate + '.jpg')
bplot.figure.savefig(resultfile, format='jpeg', dpi=100)
print('-----------------------------------------------------------------------------------------')
for sc in targetsection:
is_temp = temp2['rse_section'] == sc
temp5 = temp2[is_temp]
print('{} : {} / {}'.format(sc, len(temp5['OBE_ID'].unique()), temp5['OBE_ID'].count()))
print(
'sum: {}, mean: {}, median: {}'.format(temp5['BEFORE_NOW_TRVL_TM'].sum(), temp5['BEFORE_NOW_TRVL_TM'].mean(),
temp5['BEFORE_NOW_TRVL_TM'].median()))
print('-----------------------------------------------------------------------------------------')
if __name__ == "__main__":
if not main(sys.argv[1], sys.argv[2]):
sys.exit(1) | 39.449664 | 170 | 0.579619 |
61c26b3fa36ad844f211136b9357a96947412248 | 22,413 | py | Python | tests/python/unittest/test_io.py | ZiyueHuang/incubator-mxnet-1 | 566bbf74c9259d92a2321ccabb68ead048c75372 | [
"Apache-2.0"
] | 13 | 2017-08-11T05:19:48.000Z | 2020-05-12T02:09:27.000Z | tests/python/unittest/test_io.py | ZiyueHuang/incubator-mxnet-1 | 566bbf74c9259d92a2321ccabb68ead048c75372 | [
"Apache-2.0"
] | 4 | 2021-03-30T11:59:59.000Z | 2022-03-12T00:40:23.000Z | tests/python/unittest/test_io.py | ZiyueHuang/incubator-mxnet-1 | 566bbf74c9259d92a2321ccabb68ead048c75372 | [
"Apache-2.0"
] | 13 | 2016-11-10T06:38:46.000Z | 2021-03-18T21:26:11.000Z | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: skip-file
import mxnet as mx
import mxnet.ndarray as nd
from mxnet.test_utils import *
from mxnet.base import MXNetError
import numpy as np
import os
import gzip
import pickle
import time
try:
import h5py
except ImportError:
h5py = None
import sys
from common import assertRaises
import pytest
from itertools import zip_longest
def test_MNISTIter(tmpdir):
# prepare data
path = str(tmpdir)
get_mnist_ubyte(path)
batch_size = 100
train_dataiter = mx.io.MNISTIter(
image=os.path.join(path, 'train-images-idx3-ubyte'),
label=os.path.join(path, 'train-labels-idx1-ubyte'),
data_shape=(784,),
batch_size=batch_size, shuffle=1, flat=1, silent=0, seed=10)
# test_loop
nbatch = 60000 / batch_size
batch_count = 0
for batch in train_dataiter:
batch_count += 1
assert(nbatch == batch_count)
# test_reset
train_dataiter.reset()
train_dataiter.iter_next()
label_0 = train_dataiter.getlabel().asnumpy().flatten()
train_dataiter.iter_next()
train_dataiter.iter_next()
train_dataiter.iter_next()
train_dataiter.iter_next()
train_dataiter.reset()
train_dataiter.iter_next()
label_1 = train_dataiter.getlabel().asnumpy().flatten()
assert(sum(label_0 - label_1) == 0)
mx.nd.waitall()
def test_Cifar10Rec(tmpdir):
path = str(tmpdir)
get_cifar10(path)
dataiter = mx.io.ImageRecordIter(
path_imgrec=os.path.join(path, 'cifar', 'train.rec'),
mean_img=os.path.join(path, 'cifar', 'cifar10_mean.bin'),
rand_crop=False,
and_mirror=False,
shuffle=False,
data_shape=(3, 28, 28),
batch_size=100,
preprocess_threads=4,
prefetch_buffer=1)
labelcount = [0 for i in range(10)]
batchcount = 0
for batch in dataiter:
npdata = batch.data[0].asnumpy().flatten().sum()
sys.stdout.flush()
batchcount += 1
nplabel = batch.label[0].asnumpy()
for i in range(nplabel.shape[0]):
labelcount[int(nplabel[i])] += 1
for i in range(10):
assert(labelcount[i] == 5000)
@pytest.mark.parametrize('inter_method', [0,1,2,3,4,9,10])
def test_inter_methods_in_augmenter(inter_method, tmpdir):
path = str(tmpdir)
get_cifar10(path)
dataiter = mx.io.ImageRecordIter(
path_imgrec=os.path.join(path, 'cifar', 'train.rec'),
mean_img=os.path.join(path, 'cifar', 'cifar10_mean.bin'),
max_rotate_angle=45,
data_shape=(3, 28, 28),
batch_size=100,
inter_method=inter_method)
for batch in dataiter:
pass
def test_image_iter_exception(tmpdir):
with pytest.raises(MXNetError):
path = str(tmpdir)
get_cifar10(path)
dataiter = mx.io.ImageRecordIter(
path_imgrec=os.path.join(path, 'cifar', 'train.rec'),
mean_img=os.path.join(path, 'cifar', 'cifar10_mean.bin'),
rand_crop=False,
and_mirror=False,
shuffle=False,
data_shape=(5, 28, 28),
batch_size=100,
preprocess_threads=4,
prefetch_buffer=1)
labelcount = [0 for i in range(10)]
batchcount = 0
for batch in dataiter:
pass
def _init_NDArrayIter_data(data_type, is_image=False):
if is_image:
data = nd.random.uniform(0, 255, shape=(5000, 1, 28, 28))
labels = nd.ones((5000, 1))
return data, labels
if data_type == 'NDArray':
data = nd.ones((1000, 2, 2))
labels = nd.ones((1000, 1))
else:
data = np.ones((1000, 2, 2))
labels = np.ones((1000, 1))
for i in range(1000):
data[i] = i / 100
labels[i] = i / 100
return data, labels
def _test_last_batch_handle(data, labels=None, is_image=False):
# Test the three parameters 'pad', 'discard', 'roll_over'
last_batch_handle_list = ['pad', 'discard', 'roll_over']
if labels is not None and not is_image and len(labels) != 0:
labelcount_list = [(124, 100), (100, 96), (100, 96)]
if is_image:
batch_count_list = [40, 39, 39]
else:
batch_count_list = [8, 7, 7]
for idx in range(len(last_batch_handle_list)):
dataiter = mx.io.NDArrayIter(
data, labels, 128, False, last_batch_handle=last_batch_handle_list[idx])
batch_count = 0
if labels is not None and len(labels) != 0 and not is_image:
labelcount = [0 for i in range(10)]
for batch in dataiter:
if len(data) == 2:
assert len(batch.data) == 2
if labels is not None and len(labels) != 0:
if not is_image:
label = batch.label[0].asnumpy().flatten()
# check data if it matches corresponding labels
assert((batch.data[0].asnumpy()[:, 0, 0] == label).all())
for i in range(label.shape[0]):
labelcount[int(label[i])] += 1
else:
assert not batch.label, 'label is not empty list'
# keep the last batch of 'pad' to be used later
# to test first batch of roll_over in second iteration
batch_count += 1
if last_batch_handle_list[idx] == 'pad' and \
batch_count == batch_count_list[0]:
cache = batch.data[0].asnumpy()
# check if batchifying functionality work properly
if labels is not None and len(labels) != 0 and not is_image:
assert labelcount[0] == labelcount_list[idx][0], last_batch_handle_list[idx]
assert labelcount[8] == labelcount_list[idx][1], last_batch_handle_list[idx]
assert batch_count == batch_count_list[idx]
# roll_over option
dataiter.reset()
assert np.array_equal(dataiter.next().data[0].asnumpy(), cache)
def _test_shuffle(data, labels=None):
dataiter = mx.io.NDArrayIter(data, labels, 1, False)
batch_list = []
for batch in dataiter:
# cache the original data
batch_list.append(batch.data[0].asnumpy())
dataiter = mx.io.NDArrayIter(data, labels, 1, True)
idx_list = dataiter.idx
i = 0
for batch in dataiter:
# check if each data point have been shuffled to corresponding positions
assert np.array_equal(batch.data[0].asnumpy(), batch_list[idx_list[i]])
i += 1
def _test_corner_case():
data = np.arange(10)
data_iter = mx.io.NDArrayIter(data=data, batch_size=205, shuffle=False, last_batch_handle='pad')
expect = np.concatenate((np.tile(data, 20), np.arange(5)))
assert np.array_equal(data_iter.next().data[0].asnumpy(), expect)
def test_NDArrayIter():
dtype_list = ['NDArray', 'ndarray']
tested_data_type = [False, True]
for dtype in dtype_list:
for is_image in tested_data_type:
data, labels = _init_NDArrayIter_data(dtype, is_image)
_test_last_batch_handle(data, labels, is_image)
_test_last_batch_handle([data, data], labels, is_image)
_test_last_batch_handle(data=[data, data], is_image=is_image)
_test_last_batch_handle(
{'data1': data, 'data2': data}, labels, is_image)
_test_last_batch_handle(data={'data1': data, 'data2': data}, is_image=is_image)
_test_last_batch_handle(data, [], is_image)
_test_last_batch_handle(data=data, is_image=is_image)
_test_shuffle(data, labels)
_test_shuffle([data, data], labels)
_test_shuffle([data, data])
_test_shuffle({'data1': data, 'data2': data}, labels)
_test_shuffle({'data1': data, 'data2': data})
_test_shuffle(data, [])
_test_shuffle(data)
_test_corner_case()
def test_NDArrayIter_h5py():
if not h5py:
return
data, labels = _init_NDArrayIter_data('ndarray')
try:
os.remove('ndarraytest.h5')
except OSError:
pass
with h5py.File('ndarraytest.h5') as f:
f.create_dataset('data', data=data)
f.create_dataset('label', data=labels)
_test_last_batch_handle(f['data'], f['label'])
_test_last_batch_handle(f['data'], [])
_test_last_batch_handle(f['data'])
try:
os.remove("ndarraytest.h5")
except OSError:
pass
def _test_NDArrayIter_csr(csr_iter, csr_iter_empty_list, csr_iter_None, num_rows, batch_size):
num_batch = 0
for _, batch_empty_list, batch_empty_None in zip(csr_iter, csr_iter_empty_list, csr_iter_None):
assert not batch_empty_list.label, 'label is not empty list'
assert not batch_empty_None.label, 'label is not empty list'
num_batch += 1
assert(num_batch == num_rows // batch_size)
assertRaises(StopIteration, csr_iter.next)
assertRaises(StopIteration, csr_iter_empty_list.next)
assertRaises(StopIteration, csr_iter_None.next)
def test_NDArrayIter_csr():
# creating toy data
num_rows = rnd.randint(5, 15)
num_cols = rnd.randint(1, 20)
batch_size = rnd.randint(1, num_rows)
shape = (num_rows, num_cols)
csr, _ = rand_sparse_ndarray(shape, 'csr')
dns = csr.asnumpy()
# CSRNDArray or scipy.sparse.csr_matrix with last_batch_handle not equal to 'discard' will throw NotImplementedError
assertRaises(NotImplementedError, mx.io.NDArrayIter,
{'data': csr}, dns, batch_size)
try:
import scipy.sparse as spsp
train_data = spsp.csr_matrix(dns)
assertRaises(NotImplementedError, mx.io.NDArrayIter,
{'data': train_data}, dns, batch_size)
except ImportError:
pass
# scipy.sparse.csr_matrix with shuffle
csr_iter = iter(mx.io.NDArrayIter({'data': train_data}, dns, batch_size,
shuffle=True, last_batch_handle='discard'))
csr_iter_empty_list = iter(mx.io.NDArrayIter({'data': train_data}, [], batch_size,
shuffle=True, last_batch_handle='discard'))
csr_iter_None = iter(mx.io.NDArrayIter({'data': train_data}, None, batch_size,
shuffle=True, last_batch_handle='discard'))
_test_NDArrayIter_csr(csr_iter, csr_iter_empty_list,
csr_iter_None, num_rows, batch_size)
# CSRNDArray with shuffle
csr_iter = iter(mx.io.NDArrayIter({'csr_data': csr, 'dns_data': dns}, dns, batch_size,
shuffle=True, last_batch_handle='discard'))
csr_iter_empty_list = iter(mx.io.NDArrayIter({'csr_data': csr, 'dns_data': dns}, [], batch_size,
shuffle=True, last_batch_handle='discard'))
csr_iter_None = iter(mx.io.NDArrayIter({'csr_data': csr, 'dns_data': dns}, None, batch_size,
shuffle=True, last_batch_handle='discard'))
_test_NDArrayIter_csr(csr_iter, csr_iter_empty_list,
csr_iter_None, num_rows, batch_size)
# make iterators
csr_iter = iter(mx.io.NDArrayIter(
csr, csr, batch_size, last_batch_handle='discard'))
begin = 0
for batch in csr_iter:
expected = np.zeros((batch_size, num_cols))
end = begin + batch_size
expected[:num_rows - begin] = dns[begin:end]
if end > num_rows:
expected[num_rows - begin:] = dns[0:end - num_rows]
assert_almost_equal(batch.data[0].asnumpy(), expected)
begin += batch_size
def test_LibSVMIter(tmpdir):
def check_libSVMIter_synthetic():
data_path = os.path.join(str(tmpdir), 'data.t')
label_path = os.path.join(str(tmpdir), 'label.t')
with open(data_path, 'w') as fout:
fout.write('1.0 0:0.5 2:1.2\n')
fout.write('-2.0\n')
fout.write('-3.0 0:0.6 1:2.4 2:1.2\n')
fout.write('4 2:-1.2\n')
with open(label_path, 'w') as fout:
fout.write('1.0\n')
fout.write('-2.0 0:0.125\n')
fout.write('-3.0 2:1.2\n')
fout.write('4 1:1.0 2:-1.2\n')
data_dir = os.path.join(str(tmpdir), 'data')
data_train = mx.io.LibSVMIter(data_libsvm=data_path, label_libsvm=label_path,
data_shape=(3, ), label_shape=(3, ), batch_size=3)
first = mx.nd.array([[0.5, 0., 1.2], [0., 0., 0.], [0.6, 2.4, 1.2]])
second = mx.nd.array([[0., 0., -1.2], [0.5, 0., 1.2], [0., 0., 0.]])
i = 0
for batch in iter(data_train):
expected = first.asnumpy() if i == 0 else second.asnumpy()
data = data_train.getdata()
data.check_format(True)
assert_almost_equal(data.asnumpy(), expected)
i += 1
def check_libSVMIter_news_data():
news_metadata = {
'name': 'news20.t',
'origin_name': 'news20.t.bz2',
'url': "https://apache-mxnet.s3-accelerate.dualstack.amazonaws.com/gluon/dataset/news20.t.bz2",
'feature_dim': 62060 + 1,
'num_classes': 20,
'num_examples': 3993,
}
batch_size = 33
num_examples = news_metadata['num_examples']
data_dir = os.path.join(str(tmpdir), 'data')
get_bz2_data(data_dir, news_metadata['name'], news_metadata['url'],
news_metadata['origin_name'])
path = os.path.join(data_dir, news_metadata['name'])
data_train = mx.io.LibSVMIter(data_libsvm=path, data_shape=(news_metadata['feature_dim'],),
batch_size=batch_size)
for epoch in range(2):
num_batches = 0
for batch in data_train:
# check the range of labels
data = batch.data[0]
label = batch.label[0]
data.check_format(True)
assert(np.sum(label.asnumpy() > 20) == 0)
assert(np.sum(label.asnumpy() <= 0) == 0)
num_batches += 1
expected_num_batches = num_examples / batch_size
assert(num_batches == int(expected_num_batches)), num_batches
data_train.reset()
def check_libSVMIter_exception():
data_path = os.path.join(str(tmpdir), 'data.t')
label_path = os.path.join(str(tmpdir), 'label.t')
with open(data_path, 'w') as fout:
fout.write('1.0 0:0.5 2:1.2\n')
fout.write('-2.0\n')
# Below line has a neg indice. Should throw an exception
fout.write('-3.0 -1:0.6 1:2.4 2:1.2\n')
fout.write('4 2:-1.2\n')
with open(label_path, 'w') as fout:
fout.write('1.0\n')
fout.write('-2.0 0:0.125\n')
fout.write('-3.0 2:1.2\n')
fout.write('4 1:1.0 2:-1.2\n')
data_dir = os.path.join(str(tmpdir), 'data')
data_train = mx.io.LibSVMIter(data_libsvm=data_path, label_libsvm=label_path,
data_shape=(3, ), label_shape=(3, ), batch_size=3)
for batch in iter(data_train):
data_train.get_data().asnumpy()
check_libSVMIter_synthetic()
check_libSVMIter_news_data()
assertRaises(MXNetError, check_libSVMIter_exception)
def test_DataBatch():
from mxnet.io import DataBatch
import re
batch = DataBatch(data=[mx.nd.ones((2, 3))])
assert re.match(
r'DataBatch: data shapes: \[\(2L?, 3L?\)\] label shapes: None', str(batch))
batch = DataBatch(data=[mx.nd.ones((2, 3)), mx.nd.ones(
(7, 8))], label=[mx.nd.ones((4, 5))])
assert re.match(
r'DataBatch: data shapes: \[\(2L?, 3L?\), \(7L?, 8L?\)\] label shapes: \[\(4L?, 5L?\)\]', str(batch))
@pytest.mark.skip(reason="https://github.com/apache/incubator-mxnet/issues/18382")
def test_CSVIter(tmpdir):
def check_CSVIter_synthetic(dtype='float32'):
data_path = os.path.join(str(tmpdir), 'data.t')
label_path = os.path.join(str(tmpdir), 'label.t')
entry_str = '1'
if dtype is 'int32':
entry_str = '200000001'
if dtype is 'int64':
entry_str = '2147483648'
with open(data_path, 'w') as fout:
for i in range(1000):
fout.write(','.join([entry_str for _ in range(8*8)]) + '\n')
with open(label_path, 'w') as fout:
for i in range(1000):
fout.write('0\n')
data_train = mx.io.CSVIter(data_csv=data_path, data_shape=(8, 8),
label_csv=label_path, batch_size=100, dtype=dtype)
expected = mx.nd.ones((100, 8, 8), dtype=dtype) * int(entry_str)
for batch in iter(data_train):
data_batch = data_train.getdata()
assert_almost_equal(data_batch.asnumpy(), expected.asnumpy())
assert data_batch.asnumpy().dtype == expected.asnumpy().dtype
for dtype in ['int32', 'int64', 'float32']:
check_CSVIter_synthetic(dtype=dtype)
def test_ImageRecordIter_seed_augmentation(tmpdir):
path = str(tmpdir)
get_cifar10(path)
seed_aug = 3
def assert_dataiter_items_equals(dataiter1, dataiter2):
"""
Asserts that two data iterators have the same numbner of batches,
that the batches have the same number of items, and that the items
are the equal.
"""
for batch1, batch2 in zip_longest(dataiter1, dataiter2):
# ensure iterators contain the same number of batches
# zip_longest will return None if on of the iterators have run out of batches
assert batch1 and batch2, 'The iterators do not contain the same number of batches'
# ensure batches are of same length
assert len(batch1.data) == len(batch2.data), 'The returned batches are not of the same length'
# ensure batch data is the same
for i in range(0, len(batch1.data)):
data1 = batch1.data[i].asnumpy().astype(np.uint8)
data2 = batch2.data[i].asnumpy().astype(np.uint8)
assert(np.array_equal(data1, data2))
def assert_dataiter_items_not_equals(dataiter1, dataiter2):
"""
Asserts that two data iterators have the same numbner of batches,
that the batches have the same number of items, and that the items
are the _not_ equal.
"""
for batch1, batch2 in zip_longest(dataiter1, dataiter2):
# ensure iterators are of same length
# zip_longest will return None if on of the iterators have run out of batches
assert batch1 and batch2, 'The iterators do not contain the same number of batches'
# ensure batches are of same length
assert len(batch1.data) == len(batch2.data), 'The returned batches are not of the same length'
# ensure batch data is the same
for i in range(0, len(batch1.data)):
data1 = batch1.data[i].asnumpy().astype(np.uint8)
data2 = batch2.data[i].asnumpy().astype(np.uint8)
if not np.array_equal(data1, data2):
return
assert False, 'Expected data iterators to be different, but they are the same'
# check whether to get constant images after fixing seed_aug
dataiter1 = mx.io.ImageRecordIter(
path_imgrec=os.path.join(path, 'cifar', 'train.rec'),
mean_img=os.path.join(path, 'cifar', 'cifar10_mean.bin'),
shuffle=False,
data_shape=(3, 28, 28),
batch_size=3,
rand_crop=True,
rand_mirror=True,
max_random_scale=1.3,
max_random_illumination=3,
max_rotate_angle=10,
random_l=50,
random_s=40,
random_h=10,
max_shear_ratio=2,
seed_aug=seed_aug)
dataiter2 = mx.io.ImageRecordIter(
path_imgrec=os.path.join(path, 'cifar', 'train.rec'),
mean_img=os.path.join(path, 'cifar', 'cifar10_mean.bin'),
shuffle=False,
data_shape=(3, 28, 28),
batch_size=3,
rand_crop=True,
rand_mirror=True,
max_random_scale=1.3,
max_random_illumination=3,
max_rotate_angle=10,
random_l=50,
random_s=40,
random_h=10,
max_shear_ratio=2,
seed_aug=seed_aug)
assert_dataiter_items_equals(dataiter1, dataiter2)
# check whether to get different images after change seed_aug
dataiter1.reset()
dataiter2 = mx.io.ImageRecordIter(
path_imgrec=os.path.join(path, 'cifar', 'train.rec'),
mean_img=os.path.join(path, 'cifar', 'cifar10_mean.bin'),
shuffle=False,
data_shape=(3, 28, 28),
batch_size=3,
rand_crop=True,
rand_mirror=True,
max_random_scale=1.3,
max_random_illumination=3,
max_rotate_angle=10,
random_l=50,
random_s=40,
random_h=10,
max_shear_ratio=2,
seed_aug=seed_aug+1)
assert_dataiter_items_not_equals(dataiter1, dataiter2)
# check whether seed_aug changes the iterator behavior
dataiter1 = mx.io.ImageRecordIter(
path_imgrec=os.path.join(path, 'cifar', 'train.rec'),
mean_img=os.path.join(path, 'cifar', 'cifar10_mean.bin'),
shuffle=False,
data_shape=(3, 28, 28),
batch_size=3,
seed_aug=seed_aug)
dataiter2 = mx.io.ImageRecordIter(
path_imgrec=os.path.join(path, 'cifar', 'train.rec'),
mean_img=os.path.join(path, 'cifar', 'cifar10_mean.bin'),
shuffle=False,
data_shape=(3, 28, 28),
batch_size=3,
seed_aug=seed_aug)
assert_dataiter_items_equals(dataiter1, dataiter2)
| 38.844021 | 120 | 0.611386 |
d8513c1a383bc19a466c2173693322c29c14d5a0 | 1,536 | py | Python | ietf/utils/migrations/0001_initial.py | unofficial-mirror/ietfdb | ce54adb30dc7299c6eb4d42b9aa9d2c2929c1a81 | [
"BSD-3-Clause"
] | null | null | null | ietf/utils/migrations/0001_initial.py | unofficial-mirror/ietfdb | ce54adb30dc7299c6eb4d42b9aa9d2c2929c1a81 | [
"BSD-3-Clause"
] | null | null | null | ietf/utils/migrations/0001_initial.py | unofficial-mirror/ietfdb | ce54adb30dc7299c6eb4d42b9aa9d2c2929c1a81 | [
"BSD-3-Clause"
] | null | null | null | # Copyright The IETF Trust 2018-2019, All Rights Reserved
# -*- coding: utf-8 -*-
# Generated by Django 1.11.10 on 2018-02-20 10:52
from __future__ import absolute_import, print_function, unicode_literals
import six
if six.PY3:
from typing import List, Tuple # pyflakes:ignore
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
] # type: List[Tuple[str]]
operations = [
migrations.CreateModel(
name='DumpInfo',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date', models.DateTimeField()),
('host', models.CharField(max_length=128)),
('tz', models.CharField(default='UTC', max_length=32)),
],
),
migrations.CreateModel(
name='VersionInfo',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('time', models.DateTimeField(auto_now=True)),
('command', models.CharField(max_length=32)),
('switch', models.CharField(max_length=16)),
('version', models.CharField(max_length=64)),
('used', models.BooleanField(default=True)),
],
options={
'verbose_name_plural': 'VersionInfo',
},
),
]
| 32.680851 | 114 | 0.561198 |
605e1cb8b35de157934f7e20498fad4884e66c29 | 1,098 | py | Python | hpat/__init__.py | AlexanderKalistratov/hpat | be1c9cdbd26c55162bad4bb6dfe77af176584d40 | [
"BSD-2-Clause"
] | 1 | 2022-02-21T06:49:03.000Z | 2022-02-21T06:49:03.000Z | hpat/__init__.py | kozlov-alexey/sdc | f1a48b3388713da2f96719d7003e7a400953f21e | [
"BSD-2-Clause"
] | 2 | 2019-10-11T16:49:03.000Z | 2019-10-14T22:05:50.000Z | hpat/__init__.py | kozlov-alexey/sdc | f1a48b3388713da2f96719d7003e7a400953f21e | [
"BSD-2-Clause"
] | null | null | null | from ._version import get_versions
import numba
# re-export from Numba
from numba import (typeof, prange, pndindex, gdb, gdb_breakpoint, gdb_init,
stencil, threading_layer, jitclass, objmode)
from numba.types import *
import hpat.dict_ext
import hpat.set_ext
from hpat.set_ext import init_set_string
import hpat.distributed_api
from hpat.distributed_api import dist_time
# legacy for STAC A3, TODO: remove
from hpat.dict_ext import (DictIntInt, DictInt32Int32, dict_int_int_type,
dict_int32_int32_type)
from hpat.str_ext import string_type
from hpat.str_arr_ext import string_array_type
from numba.types import List
from hpat.utils import cprint, distribution_report
import hpat.compiler
import hpat.io
import hpat.io.np_io
import hpat.hiframes.pd_timestamp_ext
import hpat.hiframes.boxing
import hpat.config
import hpat.timsort
from hpat.decorators import jit
if hpat.config._has_xenon:
from hpat.io.xenon_ext import read_xenon, xe_connect, xe_open, xe_close
multithread_mode = False
__version__ = get_versions()['version']
del get_versions
| 28.894737 | 75 | 0.795082 |
127a9f6d62eac1129f3ad06c7238e8a29b8b8aef | 2,214 | py | Python | Tool_FoodChooser/fc1.0.py | yo1995/Daily_Python_Tasks | 211255deead5023cdcea1db4f49a53eedfe762b6 | [
"MIT"
] | 14 | 2018-05-21T05:12:25.000Z | 2021-11-28T14:49:55.000Z | Tool_FoodChooser/fc1.0.py | yo1995/Daily_Python_Tasks | 211255deead5023cdcea1db4f49a53eedfe762b6 | [
"MIT"
] | 2 | 2018-11-28T20:59:37.000Z | 2021-07-27T22:39:33.000Z | Tool_FoodChooser/fc1.0.py | yo1995/Daily_Python_Tasks | 211255deead5023cdcea1db4f49a53eedfe762b6 | [
"MIT"
] | 6 | 2019-03-21T01:07:57.000Z | 2021-03-29T03:24:33.000Z | # -*- coding: utf-8 -*-
import time
import random
def read_food_list():
with open('list.txt', 'r', encoding='UTF-8') as f:
lines = f.readlines()
meal_list = []
for line in lines:
if '!' in line:
[meal_name, meal_items] = line.split('!')
item_list = []
for item in meal_items.split(';')[:-2]:
#[item_weight, item_name] = item.split(':')
item_list.append(item.split(':'))
meal = [meal_name, item_list]
meal_list.append(meal)
# print meal_list
return meal_list
# def time_later_than(time1, time2):
# #return (time1[0] > time2[0] or time1[0] == time2[0] and time1[1] >= time2[1] )
# return 60*time1[0]+time1[1] > 60*time2[0]+time2[1]
def time_in_range(t, r):
return r[0] <= t <= r[1]
def choose_random_food(l):
weights = []
# print l
for item in l:
weights.append(float(item[0]))
random_number = random.uniform(0, sum(weights))
weight_remain = random_number
i = 0
while weight_remain > 0:
weight_remain -= weights[i]
i += 1
return l[i-1][1]
def main():
# main function
current_time = time.localtime(time.time())
minute_number = current_time[4]
minute_string = str(minute_number) if minute_number > 9 else '0' + str(minute_number)
print("Hello, it's " + str(current_time[3]) + ':' + minute_string + ' now.')
ct = current_time[3] + current_time[4]/100.0
meal_sort = ''
time_ranges = [[7.30,10], [10.50, 13.30], [16.30, 18.50],[19, 21.2], [21.21, 22.20]]
meal_index = -1
for time_range in time_ranges:
meal_index += 1
if time_in_range(ct, time_range):
meal_sort = time_range[0]
break
if meal_sort:
meal_list = read_food_list()
choice_list = meal_list[meal_index]
# print "You can have " + meal_sort + choice_list[0].decode('utf-8')
print("You can have " + choice_list[0])
food = choose_random_food(choice_list[1:][0])
print(food + ' is recommended.')
else:
print("NOT MEAL TIME NOW")
if __name__ == '__main__':
main()
| 30.328767 | 89 | 0.563686 |
551cd98945d3dcc74daa7fc488a3ed4011c36a6d | 1,456 | py | Python | pycontrolsystem/Client/DataLogger.py | DanielWinklehner/pycontrolsystem | b34ee9f463bf1d37e3892a00d313efe0708cc4c7 | [
"MIT"
] | 1 | 2019-05-19T23:50:36.000Z | 2019-05-19T23:50:36.000Z | pycontrolsystem/Client/DataLogger.py | DanielWinklehner/pycontrolsystem | b34ee9f463bf1d37e3892a00d313efe0708cc4c7 | [
"MIT"
] | null | null | null | pycontrolsystem/Client/DataLogger.py | DanielWinklehner/pycontrolsystem | b34ee9f463bf1d37e3892a00d313efe0708cc4c7 | [
"MIT"
] | null | null | null | import h5py
import numpy as np
import time
import os
class DataLogger(object):
def __init__(self, filename):
self._h5fn = filename
self._h5file = None
self._data_set = {}
self._main_group = None
def initialize(self):
if not os.path.exists(os.path.dirname(self._h5fn)):
os.makedirs(os.path.dirname(self._h5fn))
self._h5file = h5py.File(self._h5fn, "w")
self._main_group = self._h5file.create_group("mist1_control_system")
def add_device(self, dev_name):
if dev_name not in self._main_group.keys():
self._main_group.create_group(dev_name)
def add_channel(self, dev_name, ch_name):
self.add_device(dev_name)
if ch_name not in self._main_group[dev_name].keys():
dset = self._main_group[dev_name].create_dataset(ch_name, (1, 2), maxshape=(None, 2),
dtype=float, compression="gzip")
self._data_set[dset.name] = dset
def log_value(self, dev_name, ch_name, ch_value, timestamp):
self.add_channel(dev_name, ch_name)
if ch_value is not None:
dataset_name = "{}/{}/{}".format(self._main_group.name, dev_name, ch_name)
dset = self._data_set[dataset_name]
dset.resize((len(dset) + 1, 2))
a = (timestamp, ch_value)
dset[len(dset) - 1] = a
self._h5file.flush()
| 30.978723 | 97 | 0.599588 |
84b72344b6a97213a074567d021c12d87547736a | 1,067 | py | Python | mindarmour/reliability/__init__.py | hboshnak/mindarmour | 0609a4eaea875a84667bed279add9305752880cc | [
"Apache-2.0"
] | null | null | null | mindarmour/reliability/__init__.py | hboshnak/mindarmour | 0609a4eaea875a84667bed279add9305752880cc | [
"Apache-2.0"
] | null | null | null | mindarmour/reliability/__init__.py | hboshnak/mindarmour | 0609a4eaea875a84667bed279add9305752880cc | [
"Apache-2.0"
] | null | null | null | # Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Reliability methods of MindArmour.
"""
from .model_fault_injection.fault_injection import FaultInjector
from .concept_drift.concept_drift_check_time_series import ConceptDriftCheckTimeSeries
from .concept_drift.concept_drift_check_images import OodDetector
from .concept_drift.concept_drift_check_images import OodDetectorFeatureCluster
__all__ = ['FaultInjector',
'ConceptDriftCheckTimeSeries',
'OodDetector',
'OodDetectorFeatureCluster']
| 39.518519 | 86 | 0.786317 |
fd0795df512b05f96e059d9b8c325fd2d8fd104b | 16,443 | py | Python | Lib/gftools/utils.py | moontypespace/gftools | 9ff6932eb887e4c1e05dd94107aa0c3438ae26a9 | [
"Apache-2.0"
] | 24 | 2017-10-11T09:08:16.000Z | 2018-06-09T14:38:03.000Z | Lib/gftools/utils.py | moontypespace/gftools | 9ff6932eb887e4c1e05dd94107aa0c3438ae26a9 | [
"Apache-2.0"
] | 54 | 2017-10-11T10:09:46.000Z | 2018-06-08T09:54:55.000Z | Lib/gftools/utils.py | moontypespace/gftools | 9ff6932eb887e4c1e05dd94107aa0c3438ae26a9 | [
"Apache-2.0"
] | 10 | 2017-10-11T09:16:00.000Z | 2018-05-23T19:46:07.000Z | #!/usr/bin/env python3
# Copyright 2016 The Fontbakery Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import requests
from io import BytesIO
from zipfile import ZipFile
import sys
import os
import shutil
import unicodedata
from unidecode import unidecode
from collections import namedtuple
from gflanguages import LoadLanguages
from github import Github
from pkg_resources import resource_filename
from google.protobuf import text_format
import json
from PIL import Image
if sys.version_info[0] == 3:
from configparser import ConfigParser
else:
from ConfigParser import ConfigParser
# =====================================
# HELPER FUNCTIONS
def download_family_from_Google_Fonts(family, dst=None):
"""Download a font family from Google Fonts"""
url = 'https://fonts.google.com/download?family={}'.format(
family.replace(' ', '%20')
)
fonts_zip = ZipFile(download_file(url))
if dst:
fonts = fonts_from_zip(fonts_zip, dst)
# Remove static fonts if the family is a variable font
return [f for f in fonts if "static" not in f]
return fonts_from_zip(fonts_zip)
def Google_Fonts_has_family(name):
"""Check if Google Fonts has the specified font family"""
# This endpoint is private and may change at some point
# TODO (MF) if another function needs this data, refactor it into a
# function and use a lru cache
data = requests.get("https://fonts.google.com/metadata/fonts").json()
family_names = set(i["family"] for i in data["familyMetadataList"])
return name in family_names
def load_Google_Fonts_api_key():
config = ConfigParser()
config_filepath = os.path.expanduser("~/.gf-api-key")
if os.path.isfile(config_filepath):
config.read(config_filepath)
credentials = config.items("Credentials")
return credentials[0][1]
return None
def parse_github_pr_url(url):
if not "github.com" in url and "pull" not in url:
raise ValueError("{} is not a github.com pr url".format(url))
if not url[-1].isdigit():
raise ValueError("{} should end with a pull request number".format(url))
segments = url.split("/")
GithubPR = namedtuple("GithubPR", "user repo pull")
return GithubPR(segments[3], segments[4], int(segments[-1]))
def parse_github_dir_url(url):
if not "github.com" in url:
raise ValueError("{} is not a github.com dir url".format(url))
segments = url.split("/")
GithubDir = namedtuple("GithubDir", "user repo branch dir")
return GithubDir(segments[3], segments[4], segments[6], "/".join(segments[7:]))
def download_files_in_github_pr(
url,
dst,
filter_files=[],
ignore_static_dir=True,
overwrite=True,
):
"""Download files in a github pr e.g
https://github.com/google/fonts/pull/2072
Arguments
---------
url: str, github pr url
dst: str, path to output files
filter_files: list, collection of files to include. None will keep all.
ignore_static_dir: bool, If true, do not include files which reside in
a /static dir. These dirs are used in family dirs on google/fonts
e.g ofl/oswald.
overwrite: bool, set True to overwrite existing contents in dst
Returns
-------
list of paths to downloaded files
"""
gh = Github(os.environ["GH_TOKEN"])
url = parse_github_pr_url(url)
repo_slug = "{}/{}".format(url.user, url.repo)
repo = gh.get_repo(repo_slug)
pull = repo.get_pull(url.pull)
files = [f for f in pull.get_files()]
mkdir(dst, overwrite=overwrite)
# if the pr is from google/fonts or a fork of it, download all the
# files inside the family dir as well. This way means we can qa
# the whole family together as a whole unit. It will also download
# the metadata, license and description files so all Fontbakery
# checks will be executed.
if pull.base.repo.name == "fonts":
dirs = set([os.path.dirname(p.filename) for p in files])
results = []
for d in dirs:
if ignore_static_dir and '/static' in d:
continue
url = os.path.join(
pull.head.repo.html_url,
"tree",
pull.head.ref, # head branch
d)
results += download_files_in_github_dir(url, dst, overwrite=False)
return results
results = []
for f in files:
filename = os.path.join(dst, f.filename)
if filter_files and not filename.endswith(tuple(filter_files)):
continue
if ignore_static_dir and "/static" in filename:
continue
if not overwrite and os.path.exists(filename):
continue
dst_ = os.path.dirname(filename)
mkdir(dst_, overwrite=False)
download_file(f.raw_url, filename)
results.append(filename)
return results
def download_files_in_github_dir(
url,
dst,
filter_files=[],
overwrite=True
):
"""Download files in a github dir e.g
https://github.com/google/fonts/tree/main/ofl/abhayalibre
Arguments
---------
url: str, github dir url
dst: str, path to output files
filter_files: list, collection of files to include. None will keep all.
overwrite: bool, set True to overwrite existing contents in dst
Returns
-------
list of paths to downloaded files
"""
gh = Github(os.environ["GH_TOKEN"])
url = parse_github_dir_url(url)
repo_slug = "{}/{}".format(url.user, url.repo)
repo = gh.get_repo(repo_slug)
files = [f for f in repo.get_contents(url.dir, ref=url.branch)
if f.type == 'file']
mkdir(dst, overwrite=overwrite)
results = []
for f in files:
filename = os.path.join(dst, f.path)
if filter_files and not filename.endswith(tuple(filter_files)):
continue
if not overwrite and os.path.exists(filename):
continue
dst_ = os.path.dirname(filename)
mkdir(dst_, overwrite=False)
download_file(f.download_url, filename)
results.append(filename)
return results
def download_files_from_archive(url, dst):
zip_io = download_file(url)
with ZipFile(zip_io) as zip_file:
return fonts_from_zip(zip_file, dst)
def download_file(url, dst_path=None):
"""Download a file from a url. If no dst_path is specified, store the file
as a BytesIO object"""
request = requests.get(url, stream=True)
if not dst_path:
return BytesIO(request.content)
with open(dst_path, 'wb') as downloaded_file:
downloaded_file.write(request.content)
def fonts_from_zip(zipfile, dst=None):
"""Unzip fonts. If not dst is given unzip as BytesIO objects"""
fonts = []
for filename in zipfile.namelist():
if filename.endswith(".ttf"):
if dst:
target = os.path.join(dst, filename)
zipfile.extract(filename, dst)
fonts.append(target)
else:
fonts.append(BytesIO(zipfile.read(filename)))
return fonts
def cmp(x, y):
"""
Replacement for built-in function cmp that was removed in Python 3
Compare the two objects x and y and return an integer according to
the outcome. The return value is negative if x < y, zero if x == y
and strictly positive if x > y.
"""
return (x > y) - (x < y)
def mkdir(path, overwrite=True):
if os.path.isdir(path) and overwrite:
shutil.rmtree(path)
if not os.path.isdir(path):
os.makedirs(path)
return path
## Font-related utility functions
def font_stylename(ttFont):
"""Get a font's stylename using the name table. Since our fonts use the
RIBBI naming model, use the Typographic SubFamily Name (NAmeID 17) if it
exists, otherwise use the SubFamily Name (NameID 2).
Args:
ttFont: a TTFont instance
"""
return get_name_record(ttFont, 17, fallbackID=2)
def font_familyname(ttFont):
"""Get a font's familyname using the name table. since our fonts use the
RIBBI naming model, use the Typographic Family Name (NameID 16) if it
exists, otherwise use the Family Name (Name ID 1).
Args:
ttFont: a TTFont instance
"""
return get_name_record(ttFont, 16, fallbackID=1)
def get_name_record(ttFont, nameID, fallbackID=None, platform=(3, 1, 0x409)):
"""Return a name table record which has the specified nameID.
Args:
ttFont: a TTFont instance
nameID: nameID of name record to return,
fallbackID: if nameID doesn't exist, use this nameID instead
platform: Platform of name record. Default is Win US English
Returns:
str
"""
name = ttFont["name"]
record = name.getName(nameID, 3, 1, 0x409)
if not record and fallbackID:
record = name.getName(fallbackID, 3, 1, 0x409)
if not record:
raise ValueError(f"Cannot find record with nameID {nameID}")
return record.toUnicode()
def family_bounding_box(ttFonts):
y_min = min(f["head"].yMin for f in ttFonts)
y_max = max(f["head"].yMax for f in ttFonts)
return y_min, y_max
def typo_metrics_enabled(ttFont):
return True if ttFont["OS/2"].fsSelection & (1 << 7) else False
def family_is_vf(ttFonts):
has_fvar = ["fvar" in ttFont for ttFont in ttFonts]
if any(has_fvar):
if all(has_fvar):
return True
raise ValueError("Families cannot contain both static and variable fonts")
return False
def validate_family(ttFonts):
family_is_vf(ttFonts)
family_names = set(font_familyname(f) for f in ttFonts)
if len(family_names) != 1:
raise ValueError(f"Multiple families found {family_names}")
return True
def unique_name(ttFont, nameids):
font_version = _font_version(ttFont)
vendor = ttFont["OS/2"].achVendID.strip()
ps_name = nameids[6]
return f"{font_version};{vendor};{ps_name}"
def _font_version(font, platEncLang=(3, 1, 0x409)):
nameRecord = font["name"].getName(5, *platEncLang)
if nameRecord is None:
return f'{font["head"].fontRevision:.3f}'
# "Version 1.101; ttfautohint (v1.8.1.43-b0c9)" --> "1.101"
# Also works fine with inputs "Version 1.101" or "1.101" etc
versionNumber = nameRecord.toUnicode().split(";")[0]
return versionNumber.lstrip("Version ").strip()
def partition_cmap(font, test, report=True):
"""Drops all cmap tables from the font which do not pass the supplied test.
Arguments:
font: A ``TTFont`` instance
test: A function which takes a cmap table and returns True if it should
be kept or False if it should be removed from the font.
report: Reports to stdout which tables were dropped and which were kept.
Returns two lists: a list of `fontTools.ttLib.tables._c_m_a_p.*` objects
which were kept in the font, and a list of those which were removed."""
keep = []
drop = []
for index, table in enumerate(font['cmap'].tables):
if test(table):
keep.append(table)
else:
drop.append(table)
if report:
for table in keep:
print(("Keeping format {} cmap subtable with Platform ID = {}"
" and Encoding ID = {}").format(table.format,
table.platformID,
table.platEncID))
for table in drop:
print(("--- Removed format {} cmap subtable with Platform ID = {}"
" and Encoding ID = {} ---").format(table.format,
table.platformID,
table.platEncID))
font['cmap'].tables = keep
return keep, drop
def _unicode_marks(string):
unicodemap = [(u'©', '(c)'), (u'®', '(r)'), (u'™', '(tm)')]
return filter(lambda char: char[0] in string, unicodemap)
def normalize_unicode_marks(string):
""" Converts special characters like copyright,
trademark signs to ascii name """
# print("input: '{}'".format(string))
input_string = string
for mark, ascii_repl in _unicode_marks(string):
string = string.replace(mark, ascii_repl)
rv = []
# for c in unicodedata.normalize('NFKC', smart_text(string)):
for c in unicodedata.normalize('NFKC', string):
# cat = unicodedata.category(c)[0]
# if cat in 'LN' or c in ok:
rv.append(c)
new = ''.join(rv).strip()
result = unidecode(new)
if result != input_string:
print("Fixed string: '{}'".format(result))
return result
def get_fsSelection_byte2(ttfont):
return ttfont['OS/2'].fsSelection >> 8
def get_fsSelection_byte1(ttfont):
return ttfont['OS/2'].fsSelection & 255
def get_encoded_glyphs(ttFont):
"""Collect all encoded glyphs"""
return list(map(chr, ttFont.getBestCmap().keys()))
def get_unencoded_glyphs(font):
""" Check if font has unencoded glyphs """
cmap = font['cmap']
new_cmap = cmap.getcmap(3, 10)
if not new_cmap:
for ucs2cmapid in ((3, 1), (0, 3), (3, 0)):
new_cmap = cmap.getcmap(ucs2cmapid[0], ucs2cmapid[1])
if new_cmap:
break
if not new_cmap:
return []
diff = list(set(font.getGlyphOrder()) -
set(new_cmap.cmap.values()) - {'.notdef'})
return [g for g in diff[:] if g != '.notdef']
def has_mac_names(ttfont):
"""Check if a font has Mac names. Mac names have the following
field values:
platformID: 1, encodingID: 0, LanguageID: 0"""
for i in range(255):
if ttfont['name'].getName(i, 1, 0, 0):
return True
return False
def font_is_italic(ttfont):
"""Check if the font has the word "Italic" in its stylename."""
stylename = ttfont["name"].getName(2, 3, 1, 0x409).toUnicode()
return True if "Italic" in stylename else False
def font_sample_text(ttFont):
"""Collect words which exist in the Universal Declaration of Human Rights
that can be formed using the ttFont instance.
UDHR has been chosen due to the many languages it covers"""
with open(resource_filename("gftools", "udhr_all.txt")) as doc:
uhdr = doc.read()
cmap = set(ttFont.getBestCmap())
words = []
seen_chars = set()
def _add_words(words, text, seen_chars):
for word in text.split():
chars = set(ord(l) for l in word)
if not chars.issubset(cmap):
continue
if chars & seen_chars == chars:
continue
seen_chars |= chars
words.append(word)
_add_words(words, uhdr, seen_chars)
if not words:
languages = LoadLanguages()
for file, proto in languages.items():
if hasattr(proto, "sample_text"):
for key, text in proto.sample_text.ListFields():
_add_words(words, text, seen_chars)
return words
def gen_gifs(dir1, dir2, dst_dir):
dir1_imgs = set(f for f in os.listdir(dir1) if f.endswith(("jpg", "png")))
dir2_imgs = set(f for f in os.listdir(dir2) if f.endswith(("jpg", "png")))
shared_imgs = dir1_imgs & dir2_imgs
for img in shared_imgs:
gif_filename = img[:-4] + '.gif'
img_a_path = os.path.join(dir1, img)
img_b_path = os.path.join(dir2, img)
dst = os.path.join(dst_dir, gif_filename)
gen_gif(img_a_path, img_b_path, dst)
def gen_gif(img_a_path, img_b_path, dst):
with Image.open(img_a_path) as img_a, Image.open(img_b_path) as img_b:
img_a.save(
dst,
save_all=True,
append_images=[img_b],
loop=10000,
duration=1000
)
def partition(items, size):
"""partition([1,2,3,4,5,6], 2) --> [[1,2],[3,4],[5,6]]"""
return [items[i : i + size] for i in range(0, len(items), size)]
def read_proto(fp, schema):
with open(fp, "rb") as f:
data = text_format.Parse(f.read(), schema)
return data
| 31.866279 | 83 | 0.639239 |
36e7700f6876df171ca06124a37eab3a4874c3f2 | 21,247 | py | Python | wagtail/wagtailimages/models.py | chrxr/wagtail | 9038da9fcd69f0b39121cc54a72f4a6ae6beb06a | [
"BSD-3-Clause"
] | 1 | 2016-12-28T11:51:15.000Z | 2016-12-28T11:51:15.000Z | wagtail/wagtailimages/models.py | chrxr/wagtail | 9038da9fcd69f0b39121cc54a72f4a6ae6beb06a | [
"BSD-3-Clause"
] | null | null | null | wagtail/wagtailimages/models.py | chrxr/wagtail | 9038da9fcd69f0b39121cc54a72f4a6ae6beb06a | [
"BSD-3-Clause"
] | null | null | null | from __future__ import absolute_import, unicode_literals
import hashlib
import inspect
import os.path
import warnings
from collections import OrderedDict
from contextlib import contextmanager
import django
from django.conf import settings
from django.core import checks
from django.core.files import File
from django.core.urlresolvers import reverse
from django.db import models
from django.db.models.signals import post_delete, pre_save
from django.db.utils import DatabaseError
from django.dispatch.dispatcher import receiver
from django.forms.widgets import flatatt
from django.utils.encoding import python_2_unicode_compatible
from django.utils.functional import cached_property
from django.utils.safestring import mark_safe
from django.utils.six import BytesIO, string_types, text_type
from django.utils.translation import ugettext_lazy as _
from taggit.managers import TaggableManager
from unidecode import unidecode
from willow.image import Image as WillowImage
from wagtail.utils.deprecation import RemovedInWagtail19Warning, RemovedInWagtail110Warning
from wagtail.wagtailadmin.utils import get_object_usage
from wagtail.wagtailcore import hooks
from wagtail.wagtailcore.models import CollectionMember
from wagtail.wagtailimages.exceptions import InvalidFilterSpecError
from wagtail.wagtailimages.rect import Rect
from wagtail.wagtailsearch import index
from wagtail.wagtailsearch.queryset import SearchableQuerySetMixin
class SourceImageIOError(IOError):
"""
Custom exception to distinguish IOErrors that were thrown while opening the source image
"""
pass
class ImageQuerySet(SearchableQuerySetMixin, models.QuerySet):
pass
def get_image_model():
warnings.warn("wagtail.wagtailimages.models.get_image_model "
"has been moved to wagtail.wagtailimages.get_image_model",
RemovedInWagtail110Warning)
from wagtail.wagtailimages import get_image_model
return get_image_model()
def get_upload_to(instance, filename):
"""
Obtain a valid upload path for an image file.
This needs to be a module-level function so that it can be referenced within migrations,
but simply delegates to the `get_upload_to` method of the instance, so that AbstractImage
subclasses can override it.
"""
return instance.get_upload_to(filename)
def get_rendition_upload_to(instance, filename):
"""
Obtain a valid upload path for an image rendition file.
This needs to be a module-level function so that it can be referenced within migrations,
but simply delegates to the `get_upload_to` method of the instance, so that AbstractRendition
subclasses can override it.
"""
return instance.get_upload_to(filename)
@python_2_unicode_compatible
class AbstractImage(CollectionMember, index.Indexed, models.Model):
title = models.CharField(max_length=255, verbose_name=_('title'))
file = models.ImageField(
verbose_name=_('file'), upload_to=get_upload_to, width_field='width', height_field='height'
)
width = models.IntegerField(verbose_name=_('width'), editable=False)
height = models.IntegerField(verbose_name=_('height'), editable=False)
created_at = models.DateTimeField(verbose_name=_('created at'), auto_now_add=True, db_index=True)
uploaded_by_user = models.ForeignKey(
settings.AUTH_USER_MODEL, verbose_name=_('uploaded by user'),
null=True, blank=True, editable=False, on_delete=models.SET_NULL
)
tags = TaggableManager(help_text=None, blank=True, verbose_name=_('tags'))
focal_point_x = models.PositiveIntegerField(null=True, blank=True)
focal_point_y = models.PositiveIntegerField(null=True, blank=True)
focal_point_width = models.PositiveIntegerField(null=True, blank=True)
focal_point_height = models.PositiveIntegerField(null=True, blank=True)
file_size = models.PositiveIntegerField(null=True, editable=False)
objects = ImageQuerySet.as_manager()
def is_stored_locally(self):
"""
Returns True if the image is hosted on the local filesystem
"""
try:
self.file.path
return True
except NotImplementedError:
return False
def get_file_size(self):
if self.file_size is None:
try:
self.file_size = self.file.size
except OSError:
# File doesn't exist
return
self.save(update_fields=['file_size'])
return self.file_size
def get_upload_to(self, filename):
folder_name = 'original_images'
filename = self.file.field.storage.get_valid_name(filename)
# do a unidecode in the filename and then
# replace non-ascii characters in filename with _ , to sidestep issues with filesystem encoding
filename = "".join((i if ord(i) < 128 else '_') for i in unidecode(filename))
# Truncate filename so it fits in the 100 character limit
# https://code.djangoproject.com/ticket/9893
full_path = os.path.join(folder_name, filename)
if len(full_path) >= 95:
chars_to_trim = len(full_path) - 94
prefix, extension = os.path.splitext(filename)
filename = prefix[:-chars_to_trim] + extension
full_path = os.path.join(folder_name, filename)
return full_path
def get_usage(self):
return get_object_usage(self)
@property
def usage_url(self):
return reverse('wagtailimages:image_usage',
args=(self.id,))
search_fields = CollectionMember.search_fields + [
index.SearchField('title', partial_match=True, boost=10),
index.RelatedFields('tags', [
index.SearchField('name', partial_match=True, boost=10),
]),
index.FilterField('uploaded_by_user'),
]
def __str__(self):
return self.title
@contextmanager
def get_willow_image(self):
# Open file if it is closed
close_file = False
try:
image_file = self.file
if self.file.closed:
# Reopen the file
if self.is_stored_locally():
self.file.open('rb')
else:
# Some external storage backends don't allow reopening
# the file. Get a fresh file instance. #1397
storage = self._meta.get_field('file').storage
image_file = storage.open(self.file.name, 'rb')
close_file = True
except IOError as e:
# re-throw this as a SourceImageIOError so that calling code can distinguish
# these from IOErrors elsewhere in the process
raise SourceImageIOError(text_type(e))
# Seek to beginning
image_file.seek(0)
try:
yield WillowImage.open(image_file)
finally:
if close_file:
image_file.close()
def get_rect(self):
return Rect(0, 0, self.width, self.height)
def get_focal_point(self):
if self.focal_point_x is not None and \
self.focal_point_y is not None and \
self.focal_point_width is not None and \
self.focal_point_height is not None:
return Rect.from_point(
self.focal_point_x,
self.focal_point_y,
self.focal_point_width,
self.focal_point_height,
)
def has_focal_point(self):
return self.get_focal_point() is not None
def set_focal_point(self, rect):
if rect is not None:
self.focal_point_x = rect.centroid_x
self.focal_point_y = rect.centroid_y
self.focal_point_width = rect.width
self.focal_point_height = rect.height
else:
self.focal_point_x = None
self.focal_point_y = None
self.focal_point_width = None
self.focal_point_height = None
def get_suggested_focal_point(self):
with self.get_willow_image() as willow:
faces = willow.detect_faces()
if faces:
# Create a bounding box around all faces
left = min(face[0] for face in faces)
top = min(face[1] for face in faces)
right = max(face[2] for face in faces)
bottom = max(face[3] for face in faces)
focal_point = Rect(left, top, right, bottom)
else:
features = willow.detect_features()
if features:
# Create a bounding box around all features
left = min(feature[0] for feature in features)
top = min(feature[1] for feature in features)
right = max(feature[0] for feature in features)
bottom = max(feature[1] for feature in features)
focal_point = Rect(left, top, right, bottom)
else:
return None
# Add 20% to width and height and give it a minimum size
x, y = focal_point.centroid
width, height = focal_point.size
width *= 1.20
height *= 1.20
width = max(width, 100)
height = max(height, 100)
return Rect.from_point(x, y, width, height)
@classmethod
def get_rendition_model(cls):
""" Get the Rendition model for this Image model """
if django.VERSION >= (1, 9):
return cls.renditions.rel.related_model
else:
return cls.renditions.related.related_model
def get_rendition(self, filter):
if isinstance(filter, string_types):
filter, created = Filter.objects.get_or_create(spec=filter)
cache_key = filter.get_cache_key(self)
Rendition = self.get_rendition_model()
try:
rendition = self.renditions.get(
filter=filter,
focal_point_key=cache_key,
)
except Rendition.DoesNotExist:
# Generate the rendition image
generated_image = filter.run(self, BytesIO())
# Generate filename
input_filename = os.path.basename(self.file.name)
input_filename_without_extension, input_extension = os.path.splitext(input_filename)
# A mapping of image formats to extensions
FORMAT_EXTENSIONS = {
'jpeg': '.jpg',
'png': '.png',
'gif': '.gif',
}
output_extension = filter.spec.replace('|', '.') + FORMAT_EXTENSIONS[generated_image.format_name]
if cache_key:
output_extension = cache_key + '.' + output_extension
# Truncate filename to prevent it going over 60 chars
output_filename_without_extension = input_filename_without_extension[:(59 - len(output_extension))]
output_filename = output_filename_without_extension + '.' + output_extension
rendition, created = self.renditions.get_or_create(
filter=filter,
focal_point_key=cache_key,
defaults={'file': File(generated_image.f, name=output_filename)}
)
return rendition
def is_portrait(self):
return (self.width < self.height)
def is_landscape(self):
return (self.height < self.width)
@property
def filename(self):
return os.path.basename(self.file.name)
@property
def default_alt_text(self):
# by default the alt text field (used in rich text insertion) is populated
# from the title. Subclasses might provide a separate alt field, and
# override this
return self.title
def is_editable_by_user(self, user):
from wagtail.wagtailimages.permissions import permission_policy
return permission_policy.user_has_permission_for_instance(user, 'change', self)
class Meta:
abstract = True
class Image(AbstractImage):
admin_form_fields = (
'title',
'file',
'collection',
'tags',
'focal_point_x',
'focal_point_y',
'focal_point_width',
'focal_point_height',
)
# Do smartcropping calculations when user saves an image without a focal point
@receiver(pre_save, sender=Image)
def image_feature_detection(sender, instance, **kwargs):
if getattr(settings, 'WAGTAILIMAGES_FEATURE_DETECTION_ENABLED', False):
# Make sure the image doesn't already have a focal point
if not instance.has_focal_point():
# Set the focal point
instance.set_focal_point(instance.get_suggested_focal_point())
# Receive the post_delete signal and delete the file associated with the model instance.
@receiver(post_delete, sender=Image)
def image_delete(sender, instance, **kwargs):
# Pass false so FileField doesn't save the model.
instance.file.delete(False)
class Filter(models.Model):
"""
Represents one or more operations that can be applied to an Image to produce a rendition
appropriate for final display on the website. Usually this would be a resize operation,
but could potentially involve colour processing, etc.
"""
# The spec pattern is operation1-var1-var2|operation2-var1
spec = models.CharField(max_length=255, unique=True)
@cached_property
def operations(self):
# Search for operations
self._search_for_operations()
# Build list of operation objects
operations = []
for op_spec in self.spec.split('|'):
op_spec_parts = op_spec.split('-')
if op_spec_parts[0] not in self._registered_operations:
raise InvalidFilterSpecError("Unrecognised operation: %s" % op_spec_parts[0])
op_class = self._registered_operations[op_spec_parts[0]]
operations.append(op_class(*op_spec_parts))
return operations
def run(self, image, output):
with image.get_willow_image() as willow:
original_format = willow.format_name
# Fix orientation of image
willow = willow.auto_orient()
env = {
'original-format': original_format,
}
for operation in self.operations:
# Check that the operation can take the "env" argument
try:
inspect.getcallargs(operation.run, willow, image, env)
accepts_env = True
except TypeError:
# Check that the paramters fit the old style, so we don't
# raise a warning if there is a coding error
inspect.getcallargs(operation.run, willow, image)
accepts_env = False
warnings.warn("ImageOperation run methods should take 4 "
"arguments. %d.run only takes 3.",
RemovedInWagtail19Warning)
# Call operation
if accepts_env:
willow = operation.run(willow, image, env) or willow
else:
willow = operation.run(willow, image) or willow
# Find the output format to use
if 'output-format' in env:
# Developer specified an output format
output_format = env['output-format']
else:
# Default to outputting in original format
output_format = original_format
# Convert BMP files to PNG
if original_format == 'bmp':
output_format = 'png'
# Convert unanimated GIFs to PNG as well
if original_format == 'gif' and not willow.has_animation():
output_format = 'png'
if output_format == 'jpeg':
# Allow changing of JPEG compression quality
if 'jpeg-quality' in env:
quality = env['jpeg-quality']
elif hasattr(settings, 'WAGTAILIMAGES_JPEG_QUALITY'):
quality = settings.WAGTAILIMAGES_JPEG_QUALITY
else:
quality = 85
return willow.save_as_jpeg(output, quality=quality, progressive=True, optimize=True)
elif output_format == 'png':
return willow.save_as_png(output)
elif output_format == 'gif':
return willow.save_as_gif(output)
def get_cache_key(self, image):
vary_parts = []
for operation in self.operations:
for field in getattr(operation, 'vary_fields', []):
value = getattr(image, field, '')
vary_parts.append(str(value))
vary_string = '-'.join(vary_parts)
# Return blank string if there are no vary fields
if not vary_string:
return ''
return hashlib.sha1(vary_string.encode('utf-8')).hexdigest()[:8]
_registered_operations = None
@classmethod
def _search_for_operations(cls):
if cls._registered_operations is not None:
return
operations = []
for fn in hooks.get_hooks('register_image_operations'):
operations.extend(fn())
cls._registered_operations = dict(operations)
class AbstractRendition(models.Model):
filter = models.ForeignKey(Filter, related_name='+', null=True, blank=True)
filter_spec = models.CharField(max_length=255, db_index=True, blank=True, default='')
file = models.ImageField(upload_to=get_rendition_upload_to, width_field='width', height_field='height')
width = models.IntegerField(editable=False)
height = models.IntegerField(editable=False)
focal_point_key = models.CharField(max_length=255, blank=True, default='', editable=False)
@property
def url(self):
return self.file.url
@property
def alt(self):
return self.image.title
@property
def attrs(self):
"""
The src, width, height, and alt attributes for an <img> tag, as a HTML
string
"""
return flatatt(self.attrs_dict)
@property
def attrs_dict(self):
"""
A dict of the src, width, height, and alt attributes for an <img> tag.
"""
return OrderedDict([
('src', self.url),
('width', self.width),
('height', self.height),
('alt', self.alt),
])
def img_tag(self, extra_attributes={}):
attrs = self.attrs_dict.copy()
attrs.update(extra_attributes)
return mark_safe('<img{}>'.format(flatatt(attrs)))
def __html__(self):
return self.img_tag()
def get_upload_to(self, filename):
folder_name = 'images'
filename = self.file.field.storage.get_valid_name(filename)
return os.path.join(folder_name, filename)
def save(self, *args, **kwargs):
# populate the `filter_spec` field with the spec string of the filter. In Wagtail 1.8
# Filter will be dropped as a model, and lookups will be done based on this string instead
self.filter_spec = self.filter.spec
return super(AbstractRendition, self).save(*args, **kwargs)
@classmethod
def check(cls, **kwargs):
errors = super(AbstractRendition, cls).check(**kwargs)
# If a filter_spec column exists on this model, and contains null entries, warn that
# a data migration needs to be performed to populate it
try:
null_filter_spec_exists = cls.objects.filter(filter_spec='').exists()
except DatabaseError:
# The database is not in a state where the above lookup makes sense;
# this is entirely expected, because system checks are performed before running
# migrations. We're only interested in the specific case where the column exists
# in the db and contains nulls.
null_filter_spec_exists = False
if null_filter_spec_exists:
errors.append(
checks.Warning(
"Custom image model %r needs a data migration to populate filter_src" % cls,
hint="The database representation of image filters has been changed, and a data "
"migration needs to be put in place before upgrading to Wagtail 1.8, in order to "
"avoid data loss. See http://docs.wagtail.io/en/latest/releases/1.7.html#filter-spec-migration",
obj=cls,
id='wagtailimages.W001',
)
)
return errors
class Meta:
abstract = True
class Rendition(AbstractRendition):
image = models.ForeignKey(Image, related_name='renditions', on_delete=models.CASCADE)
class Meta:
unique_together = (
('image', 'filter', 'focal_point_key'),
)
# Receive the post_delete signal and delete the file associated with the model instance.
@receiver(post_delete, sender=Rendition)
def rendition_delete(sender, instance, **kwargs):
# Pass false so FileField doesn't save the model.
instance.file.delete(False)
| 36.011864 | 116 | 0.63223 |
79db99c54da5ae77ceb2652f3e288ea210df8d70 | 46,335 | py | Python | ocs_ci/ocs/node.py | kesavanvt/ocs-ci | f120044486631f49133c9f3a137842673d765a1c | [
"MIT"
] | null | null | null | ocs_ci/ocs/node.py | kesavanvt/ocs-ci | f120044486631f49133c9f3a137842673d765a1c | [
"MIT"
] | null | null | null | ocs_ci/ocs/node.py | kesavanvt/ocs-ci | f120044486631f49133c9f3a137842673d765a1c | [
"MIT"
] | null | null | null | import copy
import logging
import re
from prettytable import PrettyTable
from collections import defaultdict
from subprocess import TimeoutExpired
from ocs_ci.ocs.machine import get_machine_objs
from ocs_ci.framework import config
from ocs_ci.ocs.exceptions import TimeoutExpiredError
from ocs_ci.ocs.ocp import OCP
from ocs_ci.ocs.resources.ocs import OCS
from ocs_ci.ocs import constants, exceptions, ocp, defaults
from ocs_ci.utility.utils import TimeoutSampler, convert_device_size
from ocs_ci.ocs import machine
from ocs_ci.ocs.resources import pod
from ocs_ci.utility.utils import set_selinux_permissions
from ocs_ci.ocs.resources.pv import (
get_pv_objs_in_sc,
verify_new_pv_available_in_sc,
delete_released_pvs_in_sc,
)
log = logging.getLogger(__name__)
def get_node_objs(node_names=None):
"""
Get node objects by node names
Args:
node_names (list): The node names to get their objects for.
If None, will return all cluster nodes
Returns:
list: Cluster node OCP objects
"""
nodes_obj = OCP(kind="node")
node_dicts = nodes_obj.get()["items"]
if not node_names:
nodes = [OCS(**node_obj) for node_obj in node_dicts]
else:
nodes = [
OCS(**node_obj)
for node_obj in node_dicts
if (node_obj.get("metadata").get("name") in node_names)
]
assert nodes, "Failed to get the nodes OCS objects"
return nodes
def get_nodes(node_type=constants.WORKER_MACHINE, num_of_nodes=None):
"""
Get cluster's nodes according to the node type (e.g. worker, master) and the
number of requested nodes from that type
Args:
node_type (str): The node type (e.g. worker, master)
num_of_nodes (int): The number of nodes to be returned
Returns:
list: The nodes OCP instances
"""
if (
config.ENV_DATA["platform"].lower() == constants.OPENSHIFT_DEDICATED_PLATFORM
and node_type == constants.WORKER_MACHINE
):
typed_nodes = [
node
for node in get_node_objs()
if node_type
in node.ocp.get_resource(resource_name=node.name, column="ROLES")
and constants.INFRA_MACHINE
not in node.ocp.get_resource(resource_name=node.name, column="ROLES")
]
else:
typed_nodes = [
node
for node in get_node_objs()
if node_type
in node.ocp.get_resource(resource_name=node.name, column="ROLES")
]
if num_of_nodes:
typed_nodes = typed_nodes[:num_of_nodes]
return typed_nodes
def get_all_nodes():
"""
Gets the all nodes in cluster
Returns:
list: List of node name
"""
ocp_node_obj = ocp.OCP(kind=constants.NODE)
node_items = ocp_node_obj.get().get("items")
return [node["metadata"]["name"] for node in node_items]
def wait_for_nodes_status(node_names=None, status=constants.NODE_READY, timeout=180):
"""
Wait until all nodes are in the given status
Args:
node_names (list): The node names to wait for to reached the desired state
If None, will wait for all cluster nodes
status (str): The node status to wait for
(e.g. 'Ready', 'NotReady', 'SchedulingDisabled')
timeout (int): The number in seconds to wait for the nodes to reach
the status
Raises:
ResourceWrongStatusException: In case one or more nodes haven't
reached the desired state
"""
try:
if not node_names:
for sample in TimeoutSampler(60, 3, get_node_objs):
if sample:
node_names = [node.name for node in sample]
break
nodes_not_in_state = copy.deepcopy(node_names)
log.info(f"Waiting for nodes {node_names} to reach status {status}")
for sample in TimeoutSampler(timeout, 3, get_node_objs, nodes_not_in_state):
for node in sample:
if node.ocp.get_resource_status(node.name) == status:
log.info(f"Node {node.name} reached status {status}")
nodes_not_in_state.remove(node.name)
if not nodes_not_in_state:
break
log.info(f"The following nodes reached status {status}: {node_names}")
except TimeoutExpiredError:
log.error(
f"The following nodes haven't reached status {status}: "
f"{nodes_not_in_state}"
)
raise exceptions.ResourceWrongStatusException(
node_names, [n.describe() for n in get_node_objs(node_names)]
)
def unschedule_nodes(node_names):
"""
Change nodes to be unscheduled
Args:
node_names (list): The names of the nodes
"""
ocp = OCP(kind="node")
node_names_str = " ".join(node_names)
log.info(f"Unscheduling nodes {node_names_str}")
ocp.exec_oc_cmd(f"adm cordon {node_names_str}")
wait_for_nodes_status(node_names, status=constants.NODE_READY_SCHEDULING_DISABLED)
def schedule_nodes(node_names):
"""
Change nodes to be scheduled
Args:
node_names (list): The names of the nodes
"""
ocp = OCP(kind="node")
node_names_str = " ".join(node_names)
ocp.exec_oc_cmd(f"adm uncordon {node_names_str}")
log.info(f"Scheduling nodes {node_names_str}")
wait_for_nodes_status(node_names)
def drain_nodes(node_names):
"""
Drain nodes
Args:
node_names (list): The names of the nodes
Raises:
TimeoutExpired: in case drain command fails to complete in time
"""
ocp = OCP(kind="node")
node_names_str = " ".join(node_names)
log.info(f"Draining nodes {node_names_str}")
try:
ocp.exec_oc_cmd(
f"adm drain {node_names_str} --force=true --ignore-daemonsets "
f"--delete-local-data",
timeout=1800,
)
except TimeoutExpired:
ct_pod = pod.get_ceph_tools_pod()
ceph_status = ct_pod.exec_cmd_on_pod("ceph status", out_yaml_format=False)
log.error(f"Drain command failed to complete. Ceph status: {ceph_status}")
# TODO: Add re-balance status once pull/1679 is merged
raise
def get_typed_worker_nodes(os_id="rhcos"):
"""
Get worker nodes with specific OS
Args:
os_id (str): OS type like rhcos, RHEL etc...
Returns:
list: list of worker nodes instances having specified os
"""
worker_nodes = get_nodes(node_type="worker")
return [
node
for node in worker_nodes
if node.get().get("metadata").get("labels").get("node.openshift.io/os_id")
== os_id
]
def remove_nodes(nodes):
"""
Remove the nodes from cluster
Args:
nodes (list): list of node instances to remove from cluster
"""
ocp = OCP(kind="node")
node_names = [node.get().get("metadata").get("name") for node in nodes]
node_names_str = " ".join(node_names)
# unschedule node
unschedule_nodes(node_names)
# Drain all the pods from the node
drain_nodes(node_names)
# delete the nodes
log.info(f"Deleting nodes {node_names_str}")
ocp.exec_oc_cmd(f"delete nodes {node_names_str}")
def get_node_ips(node_type="worker"):
"""
Gets the node public IP
Args:
node_type (str): The node type (e.g. worker, master)
Returns:
list: Node IP's
"""
ocp = OCP(kind=constants.NODE)
if node_type == "worker":
nodes = ocp.get(selector=constants.WORKER_LABEL).get("items")
if node_type == "master:":
nodes = ocp.get(selector=constants.MASTER_LABEL).get("items")
if config.ENV_DATA["platform"].lower() == constants.AWS_PLATFORM:
raise NotImplementedError
elif config.ENV_DATA["platform"].lower() == constants.VSPHERE_PLATFORM:
return [
each["address"]
for node in nodes
for each in node["status"]["addresses"]
if each["type"] == "ExternalIP"
]
else:
raise NotImplementedError
def add_new_node_and_label_it(machineset_name, num_nodes=1, mark_for_ocs_label=True):
"""
Add a new node for ipi and label it
Args:
machineset_name (str): Name of the machine set
num_nodes (int): number of nodes to add
mark_for_ocs_label (bool): True if label the new node
eg: add_new_node_and_label_it("new-tdesala-zlqzn-worker-us-east-2a")
Returns:
list: new spun node names
"""
# Get the initial nodes list
initial_nodes = get_worker_nodes()
log.info(f"Current available worker nodes are {initial_nodes}")
# get machineset replica count
machineset_replica_count = machine.get_replica_count(machineset_name)
log.info(f"{machineset_name} has replica count: {machineset_replica_count}")
# Increase its replica count
log.info(f"Increasing the replica count by {num_nodes}")
machine.add_node(machineset_name, count=machineset_replica_count + num_nodes)
log.info(
f"{machineset_name} now has replica "
f"count: {machineset_replica_count + num_nodes}"
)
# wait for the new node to come to ready state
log.info("Waiting for the new node to be in ready state")
machine.wait_for_new_node_to_be_ready(machineset_name)
# Get the node name of new spun node
nodes_after_new_spun_node = get_worker_nodes()
new_spun_nodes = list(set(nodes_after_new_spun_node) - set(initial_nodes))
log.info(f"New spun nodes: {new_spun_nodes}")
# Label it
if mark_for_ocs_label:
node_obj = ocp.OCP(kind="node")
for new_spun_node in new_spun_nodes:
if is_node_labeled(new_spun_node):
logging.info(
f"node {new_spun_node} is already labeled with the OCS storage label"
)
else:
node_obj.add_label(
resource_name=new_spun_node, label=constants.OPERATOR_NODE_LABEL
)
logging.info(
f"Successfully labeled {new_spun_node} with OCS storage label"
)
return new_spun_nodes
def add_new_node_and_label_upi(
node_type, num_nodes, mark_for_ocs_label=True, node_conf=None
):
"""
Add a new node for aws/vmware upi platform and label it
Args:
node_type (str): Type of node, RHEL or RHCOS
num_nodes (int): number of nodes to add
mark_for_ocs_label (bool): True if label the new node
node_conf (dict): The node configurations.
Returns:
list: new spun node names
"""
node_conf = node_conf or {}
initial_nodes = get_worker_nodes()
from ocs_ci.ocs.platform_nodes import PlatformNodesFactory
plt = PlatformNodesFactory()
node_util = plt.get_nodes_platform()
node_util.create_and_attach_nodes_to_cluster(node_conf, node_type, num_nodes)
for sample in TimeoutSampler(timeout=600, sleep=6, func=get_worker_nodes):
if len(sample) == len(initial_nodes) + num_nodes:
break
nodes_after_exp = get_worker_nodes()
wait_for_nodes_status(node_names=get_worker_nodes(), status=constants.NODE_READY)
new_spun_nodes = list(set(nodes_after_exp) - set(initial_nodes))
log.info(f"New spun nodes: {new_spun_nodes}")
if node_type == constants.RHEL_OS:
set_selinux_permissions(workers=new_spun_nodes)
if mark_for_ocs_label:
node_obj = ocp.OCP(kind="node")
for new_spun_node in new_spun_nodes:
node_obj.add_label(
resource_name=new_spun_node, label=constants.OPERATOR_NODE_LABEL
)
logging.info(f"Successfully labeled {new_spun_node} with OCS storage label")
return new_spun_nodes
def get_node_logs(node_name):
"""
Get logs from a given node
pod_name (str): Name of the node
Returns:
str: Output of 'dmesg' run on node
"""
node = OCP(kind="node")
return node.exec_oc_debug_cmd(node_name, ["dmesg"])
def get_node_resource_utilization_from_adm_top(
nodename=None, node_type=constants.WORKER_MACHINE, print_table=False
):
"""
Gets the node's cpu and memory utilization in percentage using adm top command.
Args:
nodename (str) : The node name
node_type (str) : The node type (e.g. master, worker)
Returns:
dict : Node name and its cpu and memory utilization in
percentage
"""
node_names = (
[nodename]
if nodename
else [node.name for node in get_nodes(node_type=node_type)]
)
# Validate node is in Ready state
wait_for_nodes_status(node_names, status=constants.NODE_READY, timeout=30)
obj = ocp.OCP()
resource_utilization_all_nodes = obj.exec_oc_cmd(
command="adm top nodes", out_yaml_format=False
).split("\n")
utilization_dict = {}
for node in node_names:
for value in resource_utilization_all_nodes:
if node in value:
value = re.findall(r"(\d{1,3})%", value.strip())
cpu_utilization = value[0]
log.info(
"The CPU utilized by the node " f"{node} is {cpu_utilization}%"
)
memory_utilization = value[1]
log.info(
"The memory utilized of the node "
f"{node} is {memory_utilization}%"
)
utilization_dict[node] = {
"cpu": int(cpu_utilization),
"memory": int(memory_utilization),
}
if print_table:
print_table_node_resource_utilization(
utilization_dict=utilization_dict,
field_names=["Node Name", "CPU USAGE adm_top", "Memory USAGE adm_top"],
)
return utilization_dict
def get_node_resource_utilization_from_oc_describe(
nodename=None, node_type=constants.WORKER_MACHINE, print_table=False
):
"""
Gets the node's cpu and memory utilization in percentage using oc describe node
Args:
nodename (str) : The node name
node_type (str) : The node type (e.g. master, worker)
Returns:
dict : Node name and its cpu and memory utilization in
percentage
"""
node_names = (
[nodename]
if nodename
else [node.name for node in get_nodes(node_type=node_type)]
)
obj = ocp.OCP()
utilization_dict = {}
for node in node_names:
output = obj.exec_oc_cmd(
command=f"describe node {node}", out_yaml_format=False
).split("\n")
for line in output:
if "cpu " in line:
cpu_data = line.split(" ")
cpu = re.findall(r"\d+", [i for i in cpu_data if i][2])
if "memory " in line:
mem_data = line.split(" ")
mem = re.findall(r"\d+", [i for i in mem_data if i][2])
utilization_dict[node] = {"cpu": int(cpu[0]), "memory": int(mem[0])}
if print_table:
print_table_node_resource_utilization(
utilization_dict=utilization_dict,
field_names=[
"Node Name",
"CPU USAGE oc_describe",
"Memory USAGE oc_describe",
],
)
return utilization_dict
def get_running_pod_count_from_node(nodename=None, node_type=constants.WORKER_MACHINE):
"""
Gets the node running pod count using oc describe node
Args:
nodename (str) : The node name
node_type (str) : The node type (e.g. master, worker)
Returns:
dict : Node name and its pod_count
"""
node_names = (
[nodename]
if nodename
else [node.name for node in get_nodes(node_type=node_type)]
)
obj = ocp.OCP()
pod_count_dict = {}
for node in node_names:
output = obj.exec_oc_cmd(
command=f"describe node {node}", out_yaml_format=False
).split("\n")
for line in output:
if "Non-terminated Pods: " in line:
count_line = line.split(" ")
pod_count = re.findall(r"\d+", [i for i in count_line if i][2])
pod_count_dict[node] = int(pod_count[0])
return pod_count_dict
def print_table_node_resource_utilization(utilization_dict, field_names):
"""
Print table of node utilization
Args:
utilization_dict (dict) : CPU and Memory utilization per Node
field_names (list) : The field names of the table
"""
usage_memory_table = PrettyTable()
usage_memory_table.field_names = field_names
for node, util_node in utilization_dict.items():
usage_memory_table.add_row(
[node, f'{util_node["cpu"]}%', f'{util_node["memory"]}%']
)
log.info(f"\n{usage_memory_table}\n")
def node_network_failure(node_names, wait=True):
"""
Induce node network failure
Bring node network interface down, making the node unresponsive
Args:
node_names (list): The names of the nodes
wait (bool): True in case wait for status is needed, False otherwise
Returns:
bool: True if node network fail is successful
"""
if not isinstance(node_names, list):
node_names = [node_names]
ocp = OCP(kind="node")
fail_nw_cmd = "ifconfig $(route | grep default | awk '{print $(NF)}') down"
for node_name in node_names:
try:
ocp.exec_oc_debug_cmd(node=node_name, cmd_list=[fail_nw_cmd], timeout=15)
except TimeoutExpired:
pass
if wait:
wait_for_nodes_status(node_names=node_names, status=constants.NODE_NOT_READY)
return True
def get_osd_running_nodes():
"""
Gets the osd running node names
Returns:
list: OSD node names
"""
return [pod.get_pod_node(osd_node).name for osd_node in pod.get_osd_pods()]
def get_osds_per_node():
"""
Gets the osd running pod names per node name
Returns:
dict: {"Node name":["osd running pod name running on the node",..,]}
"""
dic_node_osd = defaultdict(list)
for osd_pod in pod.get_osd_pods():
dic_node_osd[pod.get_pod_node(osd_pod).name].append(osd_pod.name)
return dic_node_osd
def get_app_pod_running_nodes(pod_obj):
"""
Gets the app pod running node names
Args:
pod_obj (list): List of app pod objects
Returns:
list: App pod running node names
"""
return [pod.get_pod_node(obj_pod).name for obj_pod in pod_obj]
def get_both_osd_and_app_pod_running_node(osd_running_nodes, app_pod_running_nodes):
"""
Gets both osd and app pod running node names
Args:
osd_running_nodes(list): List of osd running node names
app_pod_running_nodes(list): List of app pod running node names
Returns:
list: Both OSD and app pod running node names
"""
common_nodes = list(set(osd_running_nodes) & set(app_pod_running_nodes))
log.info(f"Common node is {common_nodes}")
return common_nodes
def get_node_from_machine_name(machine_name):
"""
Get node name from a given machine_name.
Args:
machine_name (str): Name of Machine
Returns:
str: Name of Node (or None if not found)
"""
machine_objs = get_machine_objs()
for machine_obj in machine_objs:
if machine_obj.name == machine_name:
machine_dict = machine_obj.get()
node_name = machine_dict["status"]["nodeRef"]["name"]
return node_name
def get_provider():
"""
Return the OCP Provider (Platform)
Returns:
str: The Provider that the OCP is running on
"""
ocp_cluster = OCP(kind="", resource_name="nodes")
results = ocp_cluster.get("nodes")["items"][0]["spec"]
if "providerID" in results:
return results["providerID"].split(":")[0]
else:
return "BareMetal"
def get_compute_node_names(no_replace=False):
"""
Gets the compute node names
Args:
no_replace (bool): If False '.' will replaced with '-'
Returns:
list: List of compute node names
"""
platform = config.ENV_DATA.get("platform").lower()
compute_node_objs = get_nodes()
if platform in [constants.VSPHERE_PLATFORM, constants.AWS_PLATFORM]:
return [
compute_obj.get()["metadata"]["labels"][constants.HOSTNAME_LABEL]
for compute_obj in compute_node_objs
]
elif platform in [
constants.BAREMETAL_PLATFORM,
constants.BAREMETALPSI_PLATFORM,
constants.IBM_POWER_PLATFORM,
]:
if no_replace:
return [
compute_obj.get()["metadata"]["labels"][constants.HOSTNAME_LABEL]
for compute_obj in compute_node_objs
]
else:
return [
compute_obj.get()["metadata"]["labels"][
constants.HOSTNAME_LABEL
].replace(".", "-")
for compute_obj in compute_node_objs
]
else:
raise NotImplementedError
def get_ocs_nodes(num_of_nodes=None):
"""
Gets the ocs nodes
Args:
num_of_nodes (int): The number of ocs nodes to return. If not specified,
it returns all the ocs nodes.
Returns:
list: List of ocs nodes
"""
ocs_node_names = machine.get_labeled_nodes(constants.OPERATOR_NODE_LABEL)
ocs_nodes = get_node_objs(ocs_node_names)
num_of_nodes = num_of_nodes or len(ocs_nodes)
return ocs_nodes[:num_of_nodes]
def get_node_name(node_obj):
"""
Get oc node's name
Args:
node_obj (node_obj): oc node object
Returns:
str: node's name
"""
node_items = node_obj.get("items")
return node_items["metadata"]["name"]
def check_nodes_specs(min_memory, min_cpu):
"""
Check that the cluster worker nodes meet the required minimum CPU and memory
Args:
min_memory (int): The required minimum memory in bytes
min_cpu (int): The required minimum number of vCPUs
Returns:
bool: True if all nodes meet the required minimum specs, False otherwise
"""
nodes = get_nodes()
log.info(
f"Checking following nodes with worker selector (assuming that "
f"this is ran in CI and there are no worker nodes without OCS):\n"
f"{[node.get().get('metadata').get('name') for node in nodes]}"
)
for node in nodes:
real_cpu = int(node.get()["status"]["capacity"]["cpu"])
real_memory = convert_device_size(
node.get()["status"]["capacity"]["memory"], "B"
)
if real_cpu < min_cpu or real_memory < min_memory:
log.warning(
f"Node {node.get().get('metadata').get('name')} specs don't meet "
f" the minimum required specs.\n The requirements are: "
f"{min_cpu} CPUs and {min_memory} Memory\nThe node has: {real_cpu} "
f"CPUs and {real_memory} Memory"
)
return False
log.info(
f"Cluster worker nodes meet the minimum requirements of "
f"{min_cpu} CPUs and {min_memory} Memory"
)
return True
def delete_and_create_osd_node_ipi(osd_node_name):
"""
Unschedule, drain and delete osd node, and creating a new osd node.
At the end of the function there should be the same number of osd nodes as
it was in the beginning, and also ceph health should be OK.
This function is for any IPI platform.
Args:
osd_node_name (str): the name of the osd node
Returns:
str: The new node name
"""
log.info("Going to unschedule, drain and delete %s node", osd_node_name)
# Unscheduling node
unschedule_nodes([osd_node_name])
# Draining Node
drain_nodes([osd_node_name])
log.info("Getting machine name from specified node name")
machine_name = machine.get_machine_from_node_name(osd_node_name)
log.info(f"Node {osd_node_name} associated machine is {machine_name}")
log.info(f"Deleting machine {machine_name} and waiting for new machine to come up")
machine.delete_machine_and_check_state_of_new_spinned_machine(machine_name)
new_machine_list = machine.get_machines()
for machines in new_machine_list:
# Trimming is done to get just machine name
# eg:- machine_name:- prsurve-40-ocs-43-kbrvf-worker-us-east-2b-nlgkr
# After trimming:- prsurve-40-ocs-43-kbrvf-worker-us-east-2b
if re.match(machines.name[:-6], machine_name):
new_machine_name = machines.name
machineset_name = machine.get_machineset_from_machine_name(new_machine_name)
log.info("Waiting for new worker node to be in ready state")
machine.wait_for_new_node_to_be_ready(machineset_name)
new_node_name = get_node_from_machine_name(new_machine_name)
log.info("Adding ocs label to newly created worker node")
node_obj = ocp.OCP(kind="node")
node_obj.add_label(resource_name=new_node_name, label=constants.OPERATOR_NODE_LABEL)
log.info(f"Successfully labeled {new_node_name} with OCS storage label")
return new_node_name
def delete_and_create_osd_node_aws_upi(osd_node_name):
"""
Unschedule, drain and delete osd node, and creating a new osd node.
At the end of the function there should be the same number of osd nodes as
it was in the beginning, and also ceph health should be OK.
This function is for AWS UPI.
Args:
osd_node_name (str): the name of the osd node
Returns:
str: The new node name
"""
osd_node = get_node_objs(node_names=[osd_node_name])[0]
az = get_node_az(osd_node)
from ocs_ci.ocs.platform_nodes import AWSNodes
aws_nodes = AWSNodes()
stack_name_of_deleted_node = aws_nodes.get_stack_name_of_node(osd_node_name)
remove_nodes([osd_node])
log.info(f"name of deleted node = {osd_node_name}")
log.info(f"availability zone of deleted node = {az}")
log.info(f"stack name of deleted node = {stack_name_of_deleted_node}")
if config.ENV_DATA.get("rhel_workers"):
node_type = constants.RHEL_OS
else:
node_type = constants.RHCOS
log.info("Preparing to create a new node...")
node_conf = {"stack_name": stack_name_of_deleted_node}
new_node_names = add_new_node_and_label_upi(node_type, 1, node_conf=node_conf)
return new_node_names[0]
def get_node_az(node):
"""
Get the node availability zone
Args:
node (ocs_ci.ocs.resources.ocs.OCS): The node object
Returns:
str: The name of the node availability zone
"""
labels = node.get().get("metadata", {}).get("labels", {})
return labels.get("topology.kubernetes.io/zone")
def delete_and_create_osd_node_vsphere_upi(osd_node_name, use_existing_node=False):
"""
Unschedule, drain and delete osd node, and creating a new osd node.
At the end of the function there should be the same number of osd nodes as
it was in the beginning, and also ceph health should be OK.
This function is for vSphere UPI.
Args:
osd_node_name (str): the name of the osd node
use_existing_node (bool): If False, create a new node and label it.
If True, use an existing node to replace the deleted node
and label it.
Returns:
str: The new node name
"""
osd_node = get_node_objs(node_names=[osd_node_name])[0]
remove_nodes([osd_node])
log.info(f"name of deleted node = {osd_node_name}")
if config.ENV_DATA.get("rhel_workers"):
node_type = constants.RHEL_OS
else:
node_type = constants.RHCOS
if not use_existing_node:
log.info("Preparing to create a new node...")
new_node_names = add_new_node_and_label_upi(node_type, 1)
new_node_name = new_node_names[0]
else:
node_not_in_ocs = get_worker_nodes_not_in_ocs()[0]
log.info(
f"Preparing to replace the node {osd_node_name} "
f"with an existing node {node_not_in_ocs.name}"
)
if node_type == constants.RHEL_OS:
set_selinux_permissions(workers=[node_not_in_ocs])
label_nodes([node_not_in_ocs])
new_node_name = node_not_in_ocs.name
return new_node_name
def delete_and_create_osd_node_vsphere_upi_lso(osd_node_name, use_existing_node=False):
"""
Unschedule, drain and delete osd node, and creating a new osd node.
At the end of the function there should be the same number of osd nodes as
it was in the beginning, and also ceph health should be OK.
This function is for vSphere UPI.
Args:
osd_node_name (str): the name of the osd node
use_existing_node (bool): If False, create a new node and label it.
If True, use an existing node to replace the deleted node
and label it.
Returns:
str: The new node name
"""
from ocs_ci.ocs.platform_nodes import PlatformNodesFactory
from ocs_ci.ocs.resources.storage_cluster import get_osd_size
sc_name = constants.LOCAL_BLOCK_RESOURCE
old_pv_objs = get_pv_objs_in_sc(sc_name)
osd_node = get_node_objs(node_names=[osd_node_name])[0]
osd_pod = get_node_pods(osd_node_name, pods_to_search=pod.get_osd_pods())[0]
osd_id = pod.get_osd_pod_id(osd_pod)
log.info(f"osd id to remove = {osd_id}")
# Save the node hostname before deleting the node
osd_node_hostname_label = get_node_hostname_label(osd_node)
log.info("Scale down node deployments...")
scale_down_deployments(osd_node_name)
log.info("Scale down deployments finished successfully")
new_node_name = delete_and_create_osd_node_vsphere_upi(
osd_node_name, use_existing_node
)
assert new_node_name, "Failed to create a new node"
log.info(f"New node created successfully. Node name: {new_node_name}")
# If we use LSO, we need to create and attach a new disk manually
new_node = get_node_objs(node_names=[new_node_name])[0]
plt = PlatformNodesFactory()
node_util = plt.get_nodes_platform()
osd_size = get_osd_size()
log.info(
f"Create a new disk with size {osd_size}, and attach to node {new_node_name}"
)
node_util.create_and_attach_volume(node=new_node, size=osd_size)
new_node_hostname_label = get_node_hostname_label(new_node)
log.info(
"Replace the old node with the new worker node in localVolumeDiscovery and localVolumeSet"
)
res = add_new_node_to_lvd_and_lvs(
old_node_name=osd_node_hostname_label,
new_node_name=new_node_hostname_label,
)
assert res, "Failed to add the new node to LVD and LVS"
log.info("Verify new pv is available...")
is_new_pv_available = verify_new_pv_available_in_sc(old_pv_objs, sc_name)
assert is_new_pv_available, "New pv is not available"
log.info("Finished verifying that the new pv is available")
osd_removal_job = pod.run_osd_removal_job(osd_id)
assert osd_removal_job, "ocs-osd-removal failed to create"
is_completed = (pod.verify_osd_removal_job_completed_successfully(osd_id),)
assert is_completed, "ocs-osd-removal-job is not in status 'completed'"
log.info("ocs-osd-removal-job completed successfully")
expected_num_of_deleted_pvs = 1
num_of_deleted_pvs = delete_released_pvs_in_sc(sc_name)
assert (
num_of_deleted_pvs == expected_num_of_deleted_pvs
), f"num of deleted PVs is {num_of_deleted_pvs} instead of {expected_num_of_deleted_pvs}"
log.info("Successfully deleted old pv")
is_deleted = pod.delete_osd_removal_job(osd_id)
assert is_deleted, "Failed to delete ocs-osd-removal-job"
log.info("ocs-osd-removal-job deleted successfully")
return new_node_name
def label_nodes(nodes, label=constants.OPERATOR_NODE_LABEL):
"""
Label nodes
Args:
nodes (list): list of node objects need to label
label (str): New label to be assigned for these nodes.
Default value is the OCS label
"""
node_obj = ocp.OCP(kind="node")
for new_node_to_label in nodes:
node_obj.add_label(resource_name=new_node_to_label.name, label=label)
logging.info(
f"Successfully labeled {new_node_to_label.name} " f"with OCS storage label"
)
def get_master_nodes():
"""
Fetches all master nodes.
Returns:
list: List of names of master nodes
"""
label = "node-role.kubernetes.io/master"
ocp_node_obj = ocp.OCP(kind=constants.NODE)
nodes = ocp_node_obj.get(selector=label).get("items")
master_nodes_list = [node.get("metadata").get("name") for node in nodes]
return master_nodes_list
def get_worker_nodes():
"""
Fetches all worker nodes.
Returns:
list: List of names of worker nodes
"""
label = "node-role.kubernetes.io/worker"
ocp_node_obj = ocp.OCP(kind=constants.NODE)
nodes = ocp_node_obj.get(selector=label).get("items")
# Eliminate infra nodes from worker nodes in case of openshift dedicated
if config.ENV_DATA["platform"].lower() == "openshiftdedicated":
infra_nodes = ocp_node_obj.get(selector=constants.INFRA_NODE_LABEL).get("items")
infra_node_ids = [
infra_node.get("metadata").get("name") for infra_node in infra_nodes
]
nodes = [
node
for node in nodes
if node.get("metadata").get("name") not in infra_node_ids
]
worker_nodes_list = [node.get("metadata").get("name") for node in nodes]
return worker_nodes_list
def get_worker_nodes_not_in_ocs():
"""
Get the worker nodes that are not ocs labeled.
Returns:
list: list of worker node objects that are not ocs labeled
"""
ocs_nodes = get_ocs_nodes()
ocs_node_names = [n.name for n in ocs_nodes]
worker_nodes = get_nodes(constants.WORKER_MACHINE)
return [n for n in worker_nodes if n.name not in ocs_node_names]
def node_replacement_verification_steps_user_side(
old_node_name, new_node_name, new_osd_node_name, old_osd_id
):
"""
Check the verification steps that the user should perform after the process
of node replacement as described in the docs
Args:
old_node_name (str): The name of the old node that has been deleted
new_node_name (str): The name of the new node that has been created
new_osd_node_name (str): The name of the new node that has been added to osd nodes
old_osd_id (str): The old osd id
Returns:
bool: True if all the verification steps passed. False otherwise
"""
ocs_nodes = get_ocs_nodes()
ocs_node_names = [n.name for n in ocs_nodes]
if new_node_name not in ocs_node_names:
log.warning("The new node not found in ocs nodes")
return False
if old_node_name in ocs_node_names:
log.warning("The old node name found in ocs nodes")
return False
csi_cephfsplugin_pods = pod.get_plugin_pods(interface=constants.CEPHFILESYSTEM)
csi_rbdplugin_pods = pod.get_plugin_pods(interface=constants.CEPHBLOCKPOOL)
csi_plugin_pods = csi_cephfsplugin_pods + csi_rbdplugin_pods
if not all([p.status() == constants.STATUS_RUNNING for p in csi_plugin_pods]):
log.warning("Not all csi rbd and cephfs plugin pods in status running")
return False
# It can take some time until all the ocs pods are up and running
# after the process of node replacement
if not pod.wait_for_pods_to_be_running():
log.warning("Not all the pods in running state")
return False
new_osd_pod = get_node_pods(new_osd_node_name, pods_to_search=pod.get_osd_pods())[0]
if not new_osd_pod:
log.warning("Didn't find any osd pods running on the new node")
return False
new_osd_id = pod.get_osd_pod_id(new_osd_pod)
if old_osd_id != new_osd_id:
log.warning(
f"The osd pod, that associated to the new node, has the id {new_osd_id} "
f"instead of the expected osd id {old_osd_id}"
)
return False
log.info("Verification steps from the user side finish successfully")
return True
def node_replacement_verification_steps_ceph_side(
old_node_name, new_node_name, new_osd_node_name
):
"""
Check the verification steps from the Ceph side, after the process
of node replacement as described in the docs
Args:
old_node_name (str): The name of the old node that has been deleted
new_node_name (str): The name of the new node that has been created
new_osd_node_name (str): The name of the new node that has been added to osd nodes
Returns:
bool: True if all the verification steps passed. False otherwise
"""
if old_node_name == new_node_name:
log.warning("Hostname didn't change")
return False
wait_for_nodes_status([new_node_name, new_osd_node_name])
# It can take some time until all the ocs pods are up and running
# after the process of node replacement
if not pod.wait_for_pods_to_be_running():
log.warning("Not all the pods in running state")
return False
ct_pod = pod.get_ceph_tools_pod()
ceph_osd_status = ct_pod.exec_ceph_cmd(ceph_cmd="ceph osd status")
if new_osd_node_name not in ceph_osd_status:
log.warning("new osd node name not found in 'ceph osd status' output")
return False
if old_node_name in ceph_osd_status:
log.warning("old node name found in 'ceph osd status' output")
return False
osd_node_names = get_osd_running_nodes()
if new_osd_node_name not in osd_node_names:
log.warning("the new osd hostname not found in osd node names")
return False
if old_node_name in osd_node_names:
log.warning("the old hostname found in osd node names")
return False
from ocs_ci.ocs.cluster import check_ceph_osd_tree_after_node_replacement
if not check_ceph_osd_tree_after_node_replacement():
return False
log.info("Verification steps from the ceph side finish successfully")
return True
def is_node_labeled(node_name, label=constants.OPERATOR_NODE_LABEL):
"""
Check if the node is labeled with a specified label.
Args:
node_name (str): The node name to check if it has the specific label
label (str): The name of the label. Default value is the OCS label.
Returns:
bool: True if the node is labeled with the specified label. False otherwise
"""
node_names_with_label = machine.get_labeled_nodes(label=label)
return node_name in node_names_with_label
def taint_nodes(nodes, taint_label=constants.OCS_TAINT):
"""
Taint nodes
Args:
nodes (list): list of node names need to taint
taint_label (str): New taint label to be assigned for these nodes.
Default value is the OCS taint
"""
ocp_obj = ocp.OCP()
for node in nodes:
command = f"adm taint node {node} {taint_label}"
try:
ocp_obj.exec_oc_cmd(command)
logging.info(f"Successfully tainted {node} with OCS storage taint")
except Exception as e:
logging.info(f"{node} was not tainted - {e}")
def check_taint_on_ocs_nodes(taint=constants.OPERATOR_NODE_TAINT):
"""
Function to check for particular taint on nodes
Args:
taint (str): The taint to check on nodes
Return:
bool: True if taint is present on node. False otherwise
"""
ocs_nodes = get_ocs_nodes()
flag = -1
for node_obj in ocs_nodes:
if node_obj.get().get("spec").get("taints"):
if taint in node_obj.get().get("spec").get("taints")[0].get("key"):
log.info(f"Node {node_obj.name} has taint {taint}")
flag = 1
else:
flag = 0
return bool(flag)
def taint_ocs_nodes(nodes_to_taint=None):
"""
Function to taint nodes with "node.ocs.openshift.io/storage=true:NoSchedule"
Args:
nodes_to_taint (list): Nodes to taint
"""
if not check_taint_on_ocs_nodes():
ocp = OCP()
ocs_nodes = get_ocs_nodes()
nodes_to_taint = nodes_to_taint if nodes_to_taint else ocs_nodes
log.info(f"Taint nodes with taint: " f"{constants.OPERATOR_NODE_TAINT}")
for node in nodes_to_taint:
taint_cmd = f"adm taint nodes {node.name} {constants.OPERATOR_NODE_TAINT}"
ocp.exec_oc_cmd(command=taint_cmd)
else:
log.info(
f"One or more nodes already have taint {constants.OPERATOR_NODE_TAINT} "
)
def untaint_ocs_nodes(taint=constants.OPERATOR_NODE_TAINT, nodes_to_untaint=None):
"""
Function to remove taints from nodes
Args:
taint (str): taint to use
nodes_to_taint (list): list of nodes to untaint
Return:
bool: True if untainted, false otherwise
"""
if check_taint_on_ocs_nodes():
ocp = OCP()
ocs_nodes = get_ocs_nodes()
nodes_to_taint = nodes_to_untaint if nodes_to_untaint else ocs_nodes
for node in nodes_to_taint:
taint_cmd = f"adm taint nodes {node.name} {taint}-"
ocp.exec_oc_cmd(command=taint_cmd)
log.info(f"Untainted {node.name}")
return True
return False
def get_node_pods(node_name, pods_to_search=None):
"""
Get all the pods of a specified node
Args:
node_name (str): The node name to get the pods
pods_to_search (list): list of pods to search for the node pods.
If not specified, will search in all the pods.
Returns:
list: list of all the pods of the specified node
"""
pods_to_search = pods_to_search or pod.get_all_pods()
return [p for p in pods_to_search if pod.get_pod_node(p).name == node_name]
def get_node_pods_to_scale_down(node_name):
"""
Get the pods of a node to scale down as described in the documents
of node replacement with LSO
Args:
node_name (str): The node name
Returns:
list: The node's pods to scale down
"""
pods_to_scale_down = [
*pod.get_mon_pods(),
*pod.get_osd_pods(),
*pod.get_mgr_pods(),
]
return get_node_pods(node_name, pods_to_scale_down)
def scale_down_deployments(node_name):
"""
Scale down the deployments of a node as described in the documents
of node replacement with LSO
Args:
node_name (str): The node name
"""
ocp = OCP(kind="node", namespace=defaults.ROOK_CLUSTER_NAMESPACE)
pods_to_scale_down = get_node_pods_to_scale_down(node_name)
for p in pods_to_scale_down:
deployment_name = pod.get_deployment_name(p.name)
log.info(f"Scale down deploymet {deployment_name}")
ocp.exec_oc_cmd(f"scale deployment {deployment_name} --replicas=0")
log.info("Scale down rook-ceph-crashcollector")
ocp.exec_oc_cmd(
f"scale deployment --selector=app=rook-ceph-crashcollector,"
f"node_name='{node_name}' --replicas=0"
)
def get_node_index_in_local_block(node_name):
"""
Get the node index in the node values as it appears in the local block resource
Args:
node_name (str): The node name to search for his index
Returns:
int: The node index in the nodeSelector values
"""
ocp_lvs_obj = OCP(
kind=constants.LOCAL_VOLUME_SET,
namespace=defaults.LOCAL_STORAGE_NAMESPACE,
resource_name=constants.LOCAL_BLOCK_RESOURCE,
)
node_selector = ocp_lvs_obj.get().get("spec").get("nodeSelector")
node_values = (
node_selector.get("nodeSelectorTerms")[0]
.get("matchExpressions")[0]
.get("values")
)
return node_values.index(node_name)
def add_new_node_to_lvd_and_lvs(old_node_name, new_node_name):
"""
Replace the old node with the new node in localVolumeDiscovery and localVolumeSet,
as described in the documents of node replacement with LSO
Args:
old_node_name (str): The old node name to remove from the local volume
new_node_name (str): the new node name to add to the local volume
Returns:
bool: True in case if changes are applied. False otherwise
"""
old_node_index = get_node_index_in_local_block(old_node_name)
path_to_old_node = f"/spec/nodeSelector/nodeSelectorTerms/0/matchExpressions/0/values/{old_node_index}"
params = f"""[{{"op": "replace", "path": "{path_to_old_node}", "value": "{new_node_name}"}}]"""
ocp_lvd_obj = OCP(
kind=constants.LOCAL_VOLUME_DISCOVERY,
namespace=defaults.LOCAL_STORAGE_NAMESPACE,
)
ocp_lvs_obj = OCP(
kind=constants.LOCAL_VOLUME_SET,
namespace=defaults.LOCAL_STORAGE_NAMESPACE,
resource_name=constants.LOCAL_BLOCK_RESOURCE,
)
lvd_result = ocp_lvd_obj.patch(params=params, format_type="json")
lvs_result = ocp_lvs_obj.patch(params=params, format_type="json")
return lvd_result and lvs_result
def get_node_hostname_label(node_obj):
"""
Get the hostname label of a node
Args:
node_obj (ocs_ci.ocs.resources.ocs.OCS): The node object
Returns:
str: The node's hostname label
"""
return node_obj.get().get("metadata").get("labels").get(constants.HOSTNAME_LABEL)
def wait_for_new_osd_node(old_osd_node_names, timeout=180):
"""
Wait for the new osd node to appear.
Args:
old_osd_node_names (list): List of the old osd node names
timeout (int): time to wait for the new osd node to appear
Returns:
str: The new osd node name if the new osd node appear in the specific timeout.
Else it returns None
"""
try:
for current_osd_node_names in TimeoutSampler(
timeout=timeout, sleep=10, func=get_osd_running_nodes
):
new_osd_node_names = [
node_name
for node_name in current_osd_node_names
if node_name not in old_osd_node_names
]
if new_osd_node_names:
log.info(f"New osd node is {new_osd_node_names[0]}")
return new_osd_node_names[0]
except TimeoutExpiredError:
log.warning(f"New osd node didn't appear after {timeout} seconds")
return None
| 31.584867 | 107 | 0.659933 |
35b77b874a9b3869cd680ad8843b32e1cda90085 | 2,248 | py | Python | basis_modules/modules/alphavantage/importers/base.py | kvh/snapflow-modules | 6123597f4b71a8e890b8ba7df471c7efbd59d6a4 | [
"BSD-3-Clause"
] | null | null | null | basis_modules/modules/alphavantage/importers/base.py | kvh/snapflow-modules | 6123597f4b71a8e890b8ba7df471c7efbd59d6a4 | [
"BSD-3-Clause"
] | 2 | 2021-07-26T17:46:22.000Z | 2021-08-02T19:40:02.000Z | basis_modules/modules/alphavantage/importers/base.py | kvh/snapflow-modules | 6123597f4b71a8e890b8ba7df471c7efbd59d6a4 | [
"BSD-3-Clause"
] | null | null | null | from __future__ import annotations
import json
import time
from dataclasses import dataclass, field
from datetime import date, datetime, timedelta
from typing import TYPE_CHECKING, Dict, Iterator, List, Optional
from basis import Context, DataBlock, Reference, datafunction
from basis.core.extraction.connection import JsonHttpApiConnection
from dcp.data_format.formats.memory.records import Records
from dcp.utils.common import (
ensure_date,
ensure_datetime,
ensure_utc,
title_to_snake_case,
utcnow,
)
from dcp.utils.data import read_csv
if TYPE_CHECKING:
from basis_stocks import (
Ticker,
AlphavantageEodPrice,
AlphavantageCompanyOverview,
)
ALPHAVANTAGE_API_BASE_URL = "https://www.alphavantage.co/query"
MIN_DATE = date(2000, 1, 1)
MIN_DATETIME = datetime(2000, 1, 1)
def prepare_tickers(
tickers_list: Optional[List] = None,
tickers_input: Optional[DataBlock[Ticker]] = None,
) -> Optional[List[str]]:
tickers = []
if tickers_input is not None:
df = tickers_input.as_dataframe()
tickers = list(df["symbol"])
else:
if isinstance(tickers_list, str):
tickers_list = json.loads(tickers_list)
tickers = tickers_list or []
return tickers
def prepare_params_for_ticker(
ticker: str, ticker_latest_dates_imported: Dict[str, datetime]
) -> Dict:
latest_date_imported = ensure_datetime(
ticker_latest_dates_imported.get(ticker, MIN_DATETIME)
)
if ensure_utc(latest_date_imported) <= utcnow() - timedelta(days=100):
# More than 100 days worth, get full
outputsize = "full"
else:
# Less than 100 days, compact will suffice
outputsize = "compact"
params = {
"symbol": ticker,
"outputsize": outputsize,
"datatype": "csv",
"function": "TIME_SERIES_DAILY_ADJUSTED",
}
return params
def is_alphavantage_error(record: Dict) -> bool:
str_record = str(record).lower()
return "error message" in str_record or "invalid api call" in str_record
def is_alphavantage_rate_limit(record: Dict) -> bool:
return (
"calls per minute" in str(record).lower()
or "api call volume" in str(record).lower()
)
| 28.1 | 76 | 0.69395 |
36eac3c013d53ac88e94ed0664690054cb94fd2d | 11,867 | py | Python | scripts/themecreator/tools.py | AgnirudraSil/tetris | 2a4f4c26190fc8b669f98c116af343f7f1ac51bf | [
"MIT"
] | 3 | 2022-01-11T06:11:08.000Z | 2022-03-10T09:34:42.000Z | scripts/themecreator/tools.py | agnirudrasil/tetris | 2a4f4c26190fc8b669f98c116af343f7f1ac51bf | [
"MIT"
] | null | null | null | scripts/themecreator/tools.py | agnirudrasil/tetris | 2a4f4c26190fc8b669f98c116af343f7f1ac51bf | [
"MIT"
] | null | null | null | import numpy
import pygame
import pyautogui
from random import randint as rint
from scripts import openfile
from PIL import Image
from PIL import ImageDraw
class Reset:
def __init__(self):
self.surf = pygame.image.load(openfile("assets/icons/reset.png")).convert_alpha()
self.rect = self.surf.get_rect(top=6 * 32)
self.select = False
def draw(self, surface):
surface.blit(self.surf, self.rect)
def selected(self, pos, canvas, *tools):
if self.rect.collidepoint(pos) and self.select is False:
self.select = False
for tool in tools:
tool.select = False
tool.unselect()
return [72, 40]
return canvas
class Pencil:
def __init__(self):
self.surf = pygame.image.load(openfile("assets/icons/pencil_selected.png")).convert_alpha()
self.rect = self.surf.get_rect()
self.select = True
def draw(self, surface):
surface.blit(self.surf, self.rect)
def selected(self, pos, *tools):
if self.rect.collidepoint(pos) and self.select is False:
for tool in tools:
tool.select = False
tool.unselect()
self.surf = pygame.image.load(openfile("assets/icons/pencil_selected.png")).convert_alpha()
self.select = True
elif self.rect.collidepoint(pos) and self.select is True:
self.surf = pygame.image.load(openfile("assets/icons/pencil.png")).convert_alpha()
self.select = False
def unselect(self):
if self.select is False:
self.surf = pygame.image.load(openfile("assets/icons/pencil.png")).convert_alpha()
def select_(self):
if self.select is True:
self.surf = pygame.image.load(openfile("assets/icons/pencil_selected.png")).convert_alpha()
def function(self, pos, canvas, canvas_pos, color):
if self.select is True:
diff = canvas_pos[0] - 72, canvas_pos[1] - 40
clicked_pos = int(round((pos[0] - diff[0]) / (640 / 40) - 5, 0)), \
int(round((pos[1] - diff[1]) / (640 / 40) - 3, 0))
if clicked_pos[0] >= 0 and clicked_pos[1] >= 0:
try:
canvas[clicked_pos[0]][clicked_pos[1]] = color
except IndexError:
pass
return canvas
class Eraser:
def __init__(self):
self.surf = pygame.image.load(openfile("assets/icons/eraser.png")).convert_alpha()
self.rect = self.surf.get_rect(top=32)
self.select = False
def draw(self, surface):
surface.blit(self.surf, self.rect)
def selected(self, pos, *tools):
if self.rect.collidepoint(pos) and self.select is False:
for tool in tools:
tool.select = False
tool.unselect()
self.surf = pygame.image.load(openfile("assets/icons/eraser_selected.png")).convert_alpha()
self.select = True
elif self.rect.collidepoint(pos) and self.select is True:
self.surf = pygame.image.load(openfile("assets/icons/eraser.png")).convert_alpha()
self.select = False
def unselect(self):
if self.select is False:
self.surf = pygame.image.load(openfile("assets/icons/eraser.png")).convert_alpha()
def function(self, pos, canvas):
if self.select is True:
clicked_pos = int(round(pos[0] / (640 / 40) - 5, 0)), int(round(pos[1] / (640 / 40) - 3, 0))
if clicked_pos[0] >= 0 and clicked_pos[1] >= 0:
try:
canvas[clicked_pos[0]][clicked_pos[1]] = [255, 255, 255]
except IndexError:
pass
return canvas
class Marquee:
def __init__(self):
self.surf = pygame.image.load(openfile("assets/icons/marquee.png")).convert_alpha()
self.rect = self.surf.get_rect(top=2 * 32)
self.initial = [0, 0]
self.rect_select = [0, 0, 0, 0]
self.diff = [0, 0]
self.select = False
self.selection = [0, 0, 0, 0]
def draw(self, surface):
surface.blit(self.surf, self.rect)
def selected(self, pos, *tools):
if self.rect.collidepoint(pos) and self.select is False:
for tool in tools:
tool.select = False
tool.unselect()
self.surf = pygame.image.load(openfile("assets/icons/marquee_selected.png")).convert_alpha()
self.select = True
elif self.rect.collidepoint(pos) and self.select is True:
self.surf = pygame.image.load(openfile("assets/icons/marquee.png")).convert_alpha()
self.select = False
def unselect(self):
if self.select is False:
self.surf = pygame.image.load(openfile("assets/icons/marquee.png")).convert_alpha()
def set_initial(self, pos, canvas_pos):
diff = canvas_pos[0] - 72, canvas_pos[1] - 40
self.initial = int(round((pos[0] - diff[0]) / (640 / 40) - 5, 0)), \
int(round((pos[1] - diff[1]) / (640 / 40) - 3, 0))
return [0, 0, 0, 0]
def function(self, pos, canvas_pos):
if self.select is True and pos[0] >= 72 and pos[1] >= 40:
diff = canvas_pos[0] - 72, canvas_pos[1] - 40
clicked_pos = int(round((pos[0] - diff[0]) / (640 / 40) - 5, 0)), \
int(round((pos[1] - diff[1]) / (640 / 40) - 3, 0))
self.diff = [(self.initial[0] - clicked_pos[0]) * -1, (self.initial[1] - clicked_pos[1]) * -1]
self.selection = [self.initial[0], self.initial[1],
self.initial[0] + self.diff[0], self.initial[1] + self.diff[1]]
self.rect_select = [self.initial[0] * (640 // 40), self.initial[1] * (640 // 40),
self.diff[0] * (640 // 40), self.diff[1] * (640 // 40)]
return self.rect_select
def color_selection(self, color, canvas, delete=False):
if not delete:
for i in range(self.selection[0], self.selection[2]):
for j in range(self.selection[1], self.selection[3]):
canvas[i][j] = color
self.unselect_()
return canvas
else:
for i in range(self.selection[0], self.selection[2]):
for j in range(self.selection[1], self.selection[3]):
canvas[i][j] = [255, 255, 255]
self.unselect_()
return canvas
def unselect_(self):
self.rect_select = [0, 0, 0, 0]
self.selection = [0, 0, 0, 0]
class Hand:
def __init__(self):
self.surf = pygame.image.load(openfile("assets/icons/hand.png")).convert_alpha()
self.rect = self.surf.get_rect(top=3 * 32)
self.initial = [0, 0]
self.new_pos = [0, 0]
self.select = False
def draw(self, surface):
surface.blit(self.surf, self.rect)
def selected(self, pos, *tools):
if self.rect.collidepoint(pos) and self.select is False:
for tool in tools:
tool.select = False
tool.unselect()
self.surf = pygame.image.load(openfile("assets/icons/hand_selected.png")).convert_alpha()
self.select = True
elif self.rect.collidepoint(pos) and self.select is True:
self.surf = pygame.image.load(openfile("assets/icons/hand.png")).convert_alpha()
self.select = False
def unselect(self):
if self.select is False:
self.surf = pygame.image.load(openfile("assets/icons/hand.png")).convert_alpha()
def set_initial(self, pos):
if self.select is True:
if pos[0] > 72 and pos[1] > 40:
self.initial = int(round(pos[0] / (640 / 40) - 5, 0)), int(round(pos[1] / (640 / 40) - 3, 0))
def function(self, pos):
if self.select is True and pos[0] >= 72 and pos[1] >= 40:
clicked_pos = int(round(pos[0] / (640 / 40) - 5, 0)), int(round(pos[1] / (640 / 40) - 3, 0))
diff = [(self.initial[0] - clicked_pos[0]) * -1, (self.initial[1] - clicked_pos[1]) * -1]
print(diff)
self.new_pos = [diff[0] * (640 // 40), diff[1] * (640 // 40)]
return self.new_pos
class Gradient:
def __init__(self):
self.surf = pygame.image.load(openfile("assets/icons/gradient.png")).convert_alpha()
self.rect = self.surf.get_rect(top=4 * 32)
self.select = False
def draw(self, surface):
surface.blit(self.surf, self.rect)
def selected(self, pos, *tools):
if self.rect.collidepoint(pos) and self.select is False:
for tool in tools:
tool.select = False
tool.unselect()
self.surf = pygame.image.load(openfile("assets/icons/gradient_selected.png")).convert_alpha()
self.select = True
elif self.rect.collidepoint(pos) and self.select is True:
self.surf = pygame.image.load(openfile("assets/icons/gradient.png")).convert_alpha()
self.select = False
def unselect(self):
if self.select is False:
self.surf = pygame.image.load(openfile("assets/icons/gradient.png")).convert_alpha()
def function(self):
img = Image.new("RGB", (40, 40), "#FFFFFF")
draw = ImageDraw.Draw(img)
r, g, b = rint(0, 255), rint(0, 255), rint(0, 255)
dr = (rint(0, 255) - r) / 40.
dg = (rint(0, 255) - g) / 40.
db = (rint(0, 255) - b) / 40.
for i in range(40):
r, g, b = r + dr, g + dg, b + db
draw.line((i, 0, i, 40), fill=(int(r), int(g), int(b)))
return numpy.array(img).tolist()
class Eyedropper:
def __init__(self):
self.surf = pygame.image.load(openfile("assets/icons/eyedropper.png")).convert_alpha()
self.rect = self.surf.get_rect(top=5 * 32)
self.select = False
def draw(self, surface):
surface.blit(self.surf, self.rect)
def selected(self, pos, *tools):
if self.rect.collidepoint(pos) and self.select is False:
for tool in tools:
tool.select = False
tool.unselect()
self.surf = pygame.image.load(openfile("assets/icons/eyedropper_selected.png")).convert_alpha()
self.select = True
elif self.rect.collidepoint(pos) and self.select is True:
self.surf = pygame.image.load(openfile("assets/icons/eyedropper.png")).convert_alpha()
self.select = False
def unselect(self):
if self.select is False:
self.surf = pygame.image.load(openfile("assets/icons/eyedropper.png")).convert_alpha()
def function(self):
if self.select is True:
color = pyautogui.pixel(pyautogui.position()[0], pyautogui.position()[1])
return color
class Recolor:
def __init__(self):
self.surf = pygame.image.load(openfile("assets/icons/recolor.png")).convert_alpha()
self.rect = self.surf.get_rect(top=7 * 32)
self.select = False
def draw(self, surface):
surface.blit(self.surf, self.rect)
def selected(self, pos, *tools):
if self.rect.collidepoint(pos) and self.select is False:
for tool in tools:
tool.select = False
tool.unselect()
self.surf = pygame.image.load(openfile("assets/icons/recolor_selected.png")).convert_alpha()
self.select = True
elif self.rect.collidepoint(pos) and self.select is True:
self.surf = pygame.image.load(openfile("assets/icons/recolor.png")).convert_alpha()
self.select = False
def unselect(self):
if self.select is False:
self.surf = pygame.image.load(openfile("assets/icons/recolor.png")).convert_alpha()
| 38.404531 | 109 | 0.573523 |
fde0271b4311da27a55a8e82046e84762d3be116 | 567 | py | Python | ML/Practice/Regression.py | jsinghvi/Weekend_Projects | 680835e6f8afdbf797e303b19597ed06ecc3c43e | [
"MIT"
] | null | null | null | ML/Practice/Regression.py | jsinghvi/Weekend_Projects | 680835e6f8afdbf797e303b19597ed06ecc3c43e | [
"MIT"
] | null | null | null | ML/Practice/Regression.py | jsinghvi/Weekend_Projects | 680835e6f8afdbf797e303b19597ed06ecc3c43e | [
"MIT"
] | null | null | null | import quandl
import pandas as pd
import math
df = quandl.get('WIKI/GOOGL')
df = df[['Adj. Open','Adj. High','Adj. Low','Adj. Close','Adj. Volume']]
df['HL_PCT'] = (df['Adj. High'] - df['Adj. Close']) / df['Adj. Close'] * 100.0
df['PCT_change'] = (df['Adj. Close'] - df['Adj. Open']) / df['Adj. Open'] * 100.0
df = df[['Adj. Close','HL_PCT','PCT_change','Adj. Volume']]
forecast_col = 'Adj. Close'
df.fillna(-99999,inplace=True)
forecast_out = int(math.ceil(0.01*len(df))
df['label'] = df[forecast_col].shift(-forecast_out)
df.dropna(inplace=True)
print(df.tail())
| 33.352941 | 81 | 0.641975 |
4844e592675e1dcebced1456643635ebd52395ee | 568 | py | Python | server/djangoapp/admin.py | Ryllz/agfzb-CloudAppDevelopment_Capstone | a19c7c110aab16de0f73f3c309a1a0568e8ed967 | [
"Apache-2.0"
] | null | null | null | server/djangoapp/admin.py | Ryllz/agfzb-CloudAppDevelopment_Capstone | a19c7c110aab16de0f73f3c309a1a0568e8ed967 | [
"Apache-2.0"
] | null | null | null | server/djangoapp/admin.py | Ryllz/agfzb-CloudAppDevelopment_Capstone | a19c7c110aab16de0f73f3c309a1a0568e8ed967 | [
"Apache-2.0"
] | null | null | null | from django.contrib import admin
from .models import CarModel, CarMake
# CarModelInline class
class CarModelInline(admin.StackedInline):
model = CarModel
extra = 5
# CarModelAdmin class
class CarModelAdmin(admin.ModelAdmin):
fields = ['manufacturer', 'name', 'dealer_id','car_type','year']
# CarMakeAdmin class with CarModelInline
class CarMakeAdmin(admin.ModelAdmin):
fields = ['name', 'desc']
inlines = [CarModelInline]
# Register models here
admin.site.register(CarModel, CarModelAdmin)
admin.site.register(CarMake, CarMakeAdmin) | 28.4 | 72 | 0.742958 |
b3369987527db2f93c22c27fef14fd92fe40ee1b | 21,445 | py | Python | integration-tests/bot/chainbot.py | crypto-raymond/chain-1 | f0b612fc5cadf42308d3377f364f5751cca893a6 | [
"Apache-2.0"
] | null | null | null | integration-tests/bot/chainbot.py | crypto-raymond/chain-1 | f0b612fc5cadf42308d3377f364f5751cca893a6 | [
"Apache-2.0"
] | null | null | null | integration-tests/bot/chainbot.py | crypto-raymond/chain-1 | f0b612fc5cadf42308d3377f364f5751cca893a6 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
import sys
import base64
import hashlib
import json
import asyncio
import tempfile
from pathlib import Path
import re
import os
import configparser
import binascii
# import time
import shutil
import jsonpatch
import fire
import toml
import nacl.signing
from nacl.encoding import HexEncoder
PASSPHRASE = '123456'
class SigningKey:
def __init__(self, seed):
self._seed = seed
self._sk = nacl.signing.SigningKey(seed, HexEncoder)
def priv_key_base64(self):
return base64.b64encode(self._sk._signing_key).decode()
def pub_key_base64(self):
vk = self._sk.verify_key
return base64.b64encode(bytes(vk)).decode()
def validator_address(self):
vk = self._sk.verify_key
return hashlib.sha256(bytes(vk)).hexdigest()[:40].upper()
def tendermint_cfg(moniker, app_port, rpc_port, p2p_port, peers):
return {
'proxy_app': 'tcp://127.0.0.1:%d' % app_port,
'moniker': moniker,
'fast_sync': True,
'db_backend': 'goleveldb',
'db_dir': 'data',
'log_level': 'main:info,state:info,*:error',
'log_format': 'plain',
'genesis_file': 'config/genesis.json',
'priv_validator_key_file': 'config/priv_validator_key.json',
'priv_validator_state_file': 'data/priv_validator_state.json',
'priv_validator_laddr': '',
'node_key_file': 'config/node_key.json',
'abci': 'socket',
'prof_laddr': '',
'filter_peers': False,
'rpc': {
'laddr': 'tcp://0.0.0.0:%d' % rpc_port,
'cors_allowed_origins': [],
'cors_allowed_methods': [
'HEAD',
'GET',
'POST'
],
'cors_allowed_headers': [
'Origin',
'Accept',
'Content-Type',
'X-Requested-With',
'X-Server-Time'
],
'grpc_laddr': '',
'grpc_max_open_connections': 900,
'unsafe': False,
'max_open_connections': 900,
'max_subscription_clients': 100,
'max_subscriptions_per_client': 5,
'timeout_broadcast_tx_commit': '10s',
'max_body_bytes': 1000000,
'max_header_bytes': 1048576,
'tls_cert_file': '',
'tls_key_file': ''
},
'p2p': {
'laddr': 'tcp://0.0.0.0:%d' % p2p_port,
'external_address': '',
'seeds': '',
'persistent_peers': peers,
'upnp': False,
'addr_book_file': 'config/addrbook.json',
'addr_book_strict': False,
'max_num_inbound_peers': 40,
'max_num_outbound_peers': 10,
'flush_throttle_timeout': '100ms',
'max_packet_msg_payload_size': 1024,
'send_rate': 5120000,
'recv_rate': 5120000,
'pex': True,
'seed_mode': False,
'private_peer_ids': '',
'allow_duplicate_ip': True,
'handshake_timeout': '20s',
'dial_timeout': '3s'
},
'mempool': {
'recheck': True,
'broadcast': True,
'wal_dir': '',
'size': 5000,
'max_txs_bytes': 1073741824,
'cache_size': 10000,
'max_tx_bytes': 1048576
},
'fastsync': {'version': 'v0'},
'consensus': {
'wal_file': 'data/cs.wal/wal',
'timeout_propose': '3s',
'timeout_propose_delta': '500ms',
'timeout_prevote': '1s',
'timeout_prevote_delta': '500ms',
'timeout_precommit': '1s',
'timeout_precommit_delta': '500ms',
'timeout_commit': '1s',
'skip_timeout_commit': False,
'create_empty_blocks': True,
'create_empty_blocks_interval': '0s',
'peer_gossip_sleep_duration': '100ms',
'peer_query_maj23_sleep_duration': '2s'
},
'tx_index': {
'indexer': 'kv',
# 'index_tags': '',
'index_all_keys': True
},
'instrumentation': {
'prometheus': False,
'prometheus_listen_addr': ':26660',
'max_open_connections': 3,
'namespace': 'tendermint'
}
}
def priv_validator_key(seed):
sk = SigningKey(seed)
return {
'address': sk.validator_address(),
'pub_key': {
'type': 'tendermint/PubKeyEd25519',
'value': sk.pub_key_base64(),
},
'priv_key': {
'type': 'tendermint/PrivKeyEd25519',
'value': sk.priv_key_base64(),
},
}
def node_key(seed):
sk = SigningKey(seed)
return {
'priv_key': {
'type': 'tendermint/PrivKeyEd25519',
'value': sk.priv_key_base64(),
}
}
def extract_enckey(s):
return re.search(rb'Authentication token: ([0-9a-fA-F]+)', s).group(1).decode()
def app_state_cfg(cfg):
mock_keypackage = open(os.path.join(
os.path.dirname(__file__),
'../../chain-tx-enclave-next/mls/tests/test_vectors/keypackage.bin'
), 'rb').read()
return {
"distribution": gen_distribution(cfg),
"required_council_node_stake": "100000000", # 10 coins
"jailing_config": {
"block_signing_window": 20,
"missed_block_threshold": 5
},
"slashing_config": {
"liveness_slash_percent": "0.1",
"byzantine_slash_percent": "0.2",
},
"rewards_config": {
"monetary_expansion_cap": str(cfg['expansion_cap']),
"reward_period_seconds": 86400,
"monetary_expansion_r0": 450,
"monetary_expansion_tau": 145000000000000000,
"monetary_expansion_decay": 999860
},
"initial_fee_policy": {
"base_fee": "1.1",
"per_byte_fee": "1.25"
},
"evidence": {
"max_age_duration": "172800000000003",
"max_age_num_blocks": "100004"
},
"council_nodes": {
node['staking'][0]: [
node['name'],
'%s@example.com' % node['name'],
{
'type': 'tendermint/PubKeyEd25519',
'value': SigningKey(node['validator_seed']).pub_key_base64(),
},
{'keypackage': base64.b64encode(mock_keypackage).decode()} # FIXME: to be designed and implemented
]
for node in cfg['nodes'] if node['bonded_coin'] > 0
},
"genesis_time": cfg['genesis_time'],
}
def programs(node, app_hash, root_path, cfg):
node_path = root_path / Path(node['name'])
base_port = node['base_port']
tx_validation_port = base_port + 0
tx_query_port = base_port + 1
chain_abci_port = base_port + 8
tendermint_rpc_port = base_port + 7
client_rpc_port = base_port + 9
def_env = {
'RUST_BACKTRACE': '1',
'RUST_LOG': 'info',
'SGX_MODE': 'HW',
}
commands = []
if not cfg.get('mock_mode'):
commands += [
('tx-query', f"tx-query2-app-runner --enclave-path 'tx-query2-enclave-app.sgxs' --address 127.0.0.1:{tx_query_port} --zmq-conn-str tcp://127.0.0.1:{tx_validation_port} --sp-address 127.0.0.1:8989", dict(def_env)),
]
commands += [
('chain-abci', f"chain-abci -g {app_hash} -c {cfg['chain_id']} --enclave_server tcp://127.0.0.1:{tx_validation_port} --data {node_path / Path('chain')} -p {chain_abci_port} --tx_query 127.0.0.1:{tx_query_port}",
def_env),
('tendermint', f"tendermint node --home={node_path / Path('tendermint')} --proxy_app=127.0.0.1:{chain_abci_port} --rpc.laddr=tcp://0.0.0.0:{tendermint_rpc_port}",
def_env),
('client-rpc', f"client-rpc --port={client_rpc_port} --chain-id={cfg['chain_id']} "
f"--storage-dir={node_path / Path('wallet')} "
f"--websocket-url=ws://127.0.0.1:{tendermint_rpc_port}/websocket "
f"--disable-fast-forward",
dict(def_env, CRYPTO_GENESIS_FINGERPRINT=cfg['genesis_fingerprint'])),
]
return {
'program:%s-%s' % (name, node['name']): {
'command': cmd,
'stdout_logfile': f"%(here)s/logs/{name}-%(group_name)s.log",
'environment': ','.join(f'{k}={v}' for k, v in env.items()),
'autostart': 'false' if name == 'client-rpc' and not cfg.get('start_client_rpc') else 'true',
'autorestart': 'true',
'redirect_stderr': 'true',
'priority': str(priority + 1),
'startsecs': '3',
'startretries': '10',
}
for priority, (name, cmd, env) in enumerate(commands)
}
def tasks_ini(node_cfgs, app_hash, root_path, cfg):
ini = {
'supervisord': {
'pidfile': '%(here)s/supervisord.pid',
},
'rpcinterface:supervisor': {
'supervisor.rpcinterface_factory': 'supervisor.rpcinterface:make_main_rpcinterface',
},
'unix_http_server': {
'file': '%(here)s/supervisor.sock',
},
'supervisorctl': {
'serverurl': 'unix://%(here)s/supervisor.sock',
},
}
if not cfg.get('mock_mode'):
ini['program:ra-sp-server'] = {
'command': f'ra-sp-server --quote-type Unlinkable --ias-key {os.environ["IAS_API_KEY"]} --spid {os.environ["SPID"]}',
'stdout_logfile': '%(here)s/logs/ra-sp-server.log',
'autostart': 'true',
'autorestart': 'true',
'redirect_stderr': 'true',
'priority': '10',
'startsecs': '3',
'startretries': '10',
}
ini['program:mock_hardware_key_storage'] = {
'command': f'mock_hardware_wallet',
'stdout_logfile': '%(here)s/logs/mock_hardware_key_storage.log',
'autostart': 'true',
'autorestart': 'true',
'redirect_stderr': 'true',
'priority': '10',
'startsecs': '3',
'startretries': '10',
}
for node in node_cfgs:
prgs = programs(node, app_hash, root_path, cfg)
ini['group:%s' % node['name']] = {
'programs': ','.join(name.split(':', 1)[1]
for name in prgs.keys()),
}
ini.update(prgs)
return ini
def write_tasks_ini(fp, cfg):
ini = configparser.ConfigParser()
for section, items in cfg.items():
ini.add_section(section)
sec = ini[section]
sec.update(items)
ini.write(fp)
def coin_to_voting_power(coin):
return int(int(coin) / (10 ** 8))
async def run(cmd, ignore_error=False, **kwargs):
proc = await asyncio.create_subprocess_shell(cmd, **kwargs)
# begin = time.perf_counter()
retcode = await proc.wait()
# print('[%.02f] %s' % (time.perf_counter() - begin, cmd))
if not ignore_error:
assert retcode == 0, cmd
async def interact(cmd, input=None, **kwargs):
proc = await asyncio.create_subprocess_shell(
cmd,
stdin=asyncio.subprocess.PIPE,
stdout=asyncio.subprocess.PIPE,
**kwargs
)
# begin = time.perf_counter()
(stdout, stderr) = await proc.communicate(input=input)
# print('[%.02f] %s' % (time.perf_counter() - begin, cmd))
assert proc.returncode == 0, f'{stdout.decode("utf-8")} ({cmd})'
return stdout
async def fix_genesis(genesis, cfg):
with tempfile.NamedTemporaryFile('w') as fp_genesis:
json.dump(genesis, fp_genesis)
fp_genesis.flush()
with tempfile.NamedTemporaryFile('w') as fp_cfg:
json.dump(cfg, fp_cfg)
fp_cfg.flush()
await run(
f'dev-utils genesis generate --in_place --no_backup --no_genesistime_overwrite --no_evidence_overwrite '
f'--genesis_dev_config_path "{fp_cfg.name}" '
f'--tendermint_genesis_path "{fp_genesis.name}"'
)
genesis_fingerprint = (await interact(
f'dev-utils genesis fingerprint -t "{fp_genesis.name}"'
)).decode().strip()
if not genesis_fingerprint:
raise Exception("get genesis fingerprint failed")
return genesis_fingerprint, json.load(open(fp_genesis.name))
async def gen_genesis(cfg):
genesis = {
"genesis_time": cfg['genesis_time'],
"chain_id": cfg['chain_id'],
"consensus_params": {
"block": {
"max_bytes": "22020096",
"max_gas": "-1",
"time_iota_ms": "1000"
},
"evidence": {
"max_age_num_blocks": "100000",
"max_age_duration": "20000000000"
},
"validator": {
"pub_key_types": [
"ed25519"
]
}
},
'validators': [],
}
patch = jsonpatch.JsonPatch(cfg['chain_config_patch'])
cfg['genesis_fingerprint'], genesis = await fix_genesis(genesis, patch.apply(app_state_cfg(cfg)))
return genesis
def gen_validators(cfgs):
return [
(
cfg['staking'][0],
SigningKey(cfg['validator_seed']),
coin_to_voting_power(cfg['bonded_coin']),
cfg['name'],
)
for cfg in cfgs
]
def gen_distribution(cfg):
dist = {
node['staking'][0]: str(node['bonded_coin'])
for node in cfg['nodes']
}
# burn extra coins
max_coin = 10000000000000000000
total_dist = sum(node['bonded_coin'] + node['unbonded_coin'] for node in cfg['nodes'])
assert max_coin >= total_dist
burned = max_coin - total_dist - cfg['expansion_cap']
if burned > 0:
dist['0x0000000000000000000000000000000000000000'] = str(burned)
for node in cfg['nodes']:
dist[node['staking'][1]] = str(node['unbonded_coin'])
return dist
def gen_peers(cfgs):
return ','.join(
'tcp://%s@%s:%d' % (
SigningKey(cfg['node_seed']).validator_address().lower(),
cfg['hostname'],
cfg['base_port'] + 6
)
for i, cfg in enumerate(cfgs)
)
async def init_wallet(wallet_root, mnemonic, chain_id, staking_count, transfer_count):
'init wallet and return generated addresses'
env = dict(
os.environ,
CRYPTO_CLIENT_STORAGE=wallet_root,
CRYPTO_CHAIN_ID=chain_id
)
stdout = await interact(
f'client-cli wallet restore --name Default',
('%s\n%s\n%s\n%s\n' % (
PASSPHRASE, PASSPHRASE, mnemonic, mnemonic
)).encode(),
env=env,
)
enckey = extract_enckey(stdout)
staking_addresses = []
for _ in range(staking_count):
result = await interact(
f'client-cli address new --name Default --type Staking',
('%s\n' % enckey).encode(),
env=env,
)
staking_addresses.append(re.search(r'0x[0-9a-zA-Z]+', result.decode()).group())
transfer_addresses = []
for _ in range(transfer_count):
result = await interact(
f'client-cli address new --name Default --type Transfer',
('%s\n' % enckey).encode(),
env=env,
)
transfer_addresses.append(re.search(r'dcro[0-9a-zA-Z]+', result.decode()).group())
return staking_addresses, transfer_addresses
async def init_cluster(cfg):
root_path = Path(cfg['root_path']).resolve()
if root_path.exists():
print('root path(%s) exists, remove it first' % root_path)
shutil.rmtree(root_path)
root_path.mkdir()
# init wallet and populate node fields
for i, node in enumerate(cfg['nodes']):
node['node_id'] = SigningKey(node['node_seed']).validator_address().lower()
wallet_path = root_path / Path('node%d' % i) / Path('wallet')
os.makedirs(wallet_path)
node['staking'], node['transfer'] = \
await init_wallet(wallet_path, node['mnemonic'], cfg['chain_id'], 2, 2)
peers = gen_peers(cfg['nodes'])
genesis = await gen_genesis(cfg)
app_hash = genesis['app_hash']
json.dump(
cfg,
open(root_path / Path('info.json'), 'w'),
indent=4
)
for i, node in enumerate(cfg['nodes']):
base_port = node['base_port']
node_name = 'node%d' % i
cfg_path = root_path / Path(node_name) / Path('tendermint') / Path('config')
os.makedirs(cfg_path)
json.dump(genesis,
open(cfg_path / Path('genesis.json'), 'w'),
indent=4)
json.dump(node_key(node['node_seed']),
open(cfg_path / Path('node_key.json'), 'w'),
indent=4)
json.dump(node_key(node['validator_seed']),
open(cfg_path / Path('priv_validator_key.json'), 'w'),
indent=4)
patch = jsonpatch.JsonPatch(cfg['tendermint_config_patch'])
toml.dump(
patch.apply(
tendermint_cfg(
node_name,
base_port + 8,
base_port + 7,
base_port + 6,
peers
)
),
open(cfg_path / Path('config.toml'), 'w')
)
data_path = root_path / Path(node_name) / Path('tendermint') / Path('data')
if not data_path.exists():
data_path.mkdir()
json.dump({
"height": "0",
"round": "0",
"step": 0
}, open(data_path / Path('priv_validator_state.json'), 'w'))
logs_path = root_path / Path('logs')
if not logs_path.exists():
logs_path.mkdir()
write_tasks_ini(open(root_path / Path('tasks.ini'), 'w'),
tasks_ini(cfg['nodes'], app_hash, root_path, cfg))
def gen_mnemonic():
import mnemonic
return mnemonic.Mnemonic('english').generate(160)
def gen_seed():
return binascii.hexlify(os.urandom(32)).decode()
class CLI:
def _gen(self, count=1, expansion_cap=1000000000000000000,
dist=1000000000000000000,
genesis_time="2019-11-20T08:56:48.618137Z",
base_fee='0.0', per_byte_fee='0.0',
base_port=26650,
chain_id='test-chain-y3m1e6-AB', root_path='./data', hostname='127.0.0.1',
mock_mode=False):
'''Generate testnet node specification
:param count: Number of nodes, [default: 1].
'''
share = int(dist / count / 2)
cfg = {
'mock_mode': mock_mode,
'root_path': root_path,
'chain_id': chain_id,
'genesis_time': genesis_time,
'expansion_cap': expansion_cap,
'nodes': [
{
'name': 'node%d' % i,
'hostname': hostname.format(index=i),
'mnemonic': gen_mnemonic(),
'validator_seed': gen_seed(),
'node_seed': gen_seed(),
'bonded_coin': share,
'unbonded_coin': share,
'base_port': base_port + (i * 10),
}
for i in range(count)
],
'chain_config_patch': [
{'op': 'replace', 'path': '/initial_fee_policy/base_fee', 'value': base_fee},
{'op': 'replace', 'path': '/initial_fee_policy/per_byte_fee', 'value': per_byte_fee},
],
'tendermint_config_patch': [
{'op': 'replace', 'path': '/consensus/create_empty_blocks', 'value': True},
{'op': 'add', 'path': '/consensus/create_empty_blocks_interval', 'value': '0s'},
],
}
return cfg
def gen(self, count=1, expansion_cap=1000000000000000000,
dist=1000000000000000000,
genesis_time="2019-11-20T08:56:48.618137Z",
base_fee='0.0', per_byte_fee='0.0',
base_port=26650,
chain_id='test-chain-y3m1e6-AB', root_path='./data', hostname='127.0.0.1',
mock_mode=False):
if mock_mode:
print("TODO: mock mode is pending a revision")
sys.exit(1)
cfg = self._gen(
count, expansion_cap, dist, genesis_time,
base_fee, per_byte_fee, base_port,
chain_id, root_path, hostname, mock_mode
)
return json.dumps(cfg, indent=4)
def _prepare(self, cfg):
asyncio.run(init_cluster(cfg))
def prepare(self, spec=None, base_port=None, mock_mode=None, start_client_rpc=None):
'''Prepare tendermint testnet based on specification
:param spec: Path of specification file, [default: stdin]
'''
cfg = json.load(open(spec) if spec else sys.stdin)
if base_port is not None:
for i, node in enumerate(cfg['nodes']):
node['base_port'] = base_port + i * 10
if mock_mode is not None:
cfg['mock_mode'] = mock_mode
if start_client_rpc is not None:
cfg['start_client_rpc'] = start_client_rpc
self._prepare(cfg)
print(
'Prepared succesfully',
cfg['root_path'],
cfg['nodes'][0]['base_port'],
cfg.get('mock_mode') and 'MOCK' or 'SGX'
)
if __name__ == '__main__':
fire.Fire(CLI())
| 33.351477 | 229 | 0.545069 |
879b5b6a238b3dbef41ac364fbb86a957f943836 | 7,103 | py | Python | rockyroad/driver.py | kevbradwick/rockyroad | d0b9da7273b028d5435a68b0e4c409aa882f49a2 | [
"BSD-3-Clause"
] | null | null | null | rockyroad/driver.py | kevbradwick/rockyroad | d0b9da7273b028d5435a68b0e4c409aa882f49a2 | [
"BSD-3-Clause"
] | null | null | null | rockyroad/driver.py | kevbradwick/rockyroad | d0b9da7273b028d5435a68b0e4c409aa882f49a2 | [
"BSD-3-Clause"
] | null | null | null | import errno
import glob
import platform
import re
import sys
import tempfile
import zipfile
from contextlib import contextmanager
from distutils.version import StrictVersion
import os
import requests
from xml.etree import ElementTree
IS_64_BIT = sys.maxsize > 2**32
IS_LINUX = platform.system().lower() == 'linux'
IS_WINDOWS = platform.system().lower() == 'windows'
IS_MAC = platform.system().lower() == 'darwin'
UNKNOWN_PLATFORM = not IS_LINUX and not IS_WINDOWS
REPO_DIR = os.path.join(os.path.expanduser('~'), '.rockyroad')
@contextmanager
def download_file(url):
"""
Download a remote file to a temporary location.
:param url: the file url
"""
resp = requests.get(url, stream=True)
with tempfile.NamedTemporaryFile(delete=False) as fp:
name = fp.name
for chunk in resp.iter_content(chunk_size=1024):
if chunk:
fp.write(chunk)
yield name
fp.close()
def _mkdirp(dirpath):
try:
os.makedirs(dirpath)
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir(dirpath):
pass
def _get_xml_ns(uri):
m = re.match(r'\{.*?\}', uri)
return m.group(0) if m else ''
class Driver:
version = None
bit = None
repo_dir = os.path.join(os.path.expanduser('~'), '.rockyroad')
def __init__(self, version=None, bit=None):
if version:
self.version = str(version)
if not bit:
self.bit = '64' if IS_64_BIT else '32'
else:
self.bit = str(bit)
if hasattr(self, 'setup'):
self.setup()
def download(self):
"""Download the driver binary"""
raise NotImplementedError('You must implement download()')
def binary_path(self):
"""The absolute path to the driver binary"""
raise NotImplementedError('You must implement binary_path()')
def path(self):
"""
The absolute path to the driver
:return:
"""
if not os.path.exists(self.binary_path()):
self.download()
return self.binary_path()
class ChromeDriver(Driver):
versions = {}
_bin_path = None
def setup(self):
url = 'https://chromedriver.storage.googleapis.com/'
resp = requests.get(url)
tree = ElementTree.fromstring(resp.content)
ns = _get_xml_ns(tree.tag)
for elem in tree.findall('%sContents' % ns):
key = elem.find('%sKey' % ns)
m = re.match('^([\d.]+?)/chromedriver_(linux|mac|win)(32|64)', key.text)
if m:
v = m.group(1) # version
p = m.group(2) # platform
b = m.group(3) # bit
if v not in self.versions:
self.versions[v] = {}
if p not in self.versions[v]:
self.versions[v][p] = {}
self.versions[v][p][b] = url + key.text
@property
def _platform(self):
if IS_WINDOWS:
return 'win'
elif IS_LINUX:
return 'linux'
elif IS_MAC:
return 'mac'
else:
raise RuntimeError('Unable to detect current platform')
def binary_path(self):
if self._bin_path:
return self._bin_path
if self.version and self.version not in self.versions:
raise RuntimeError('Chromedriver %s does not exist' % self.version)
if not self.version:
numbers = list(self.versions.keys())
numbers.sort(key=StrictVersion, reverse=True)
self.version = numbers[0]
bin_name = 'chromedriver.exe' if IS_WINDOWS else 'chromedriver'
self._bin_path = os.path.join(REPO_DIR, 'chromedriver', '%s-%s%s' %
(self.version, self._platform, self.bit,),
bin_name)
return self._bin_path
def download(self):
url = self.versions[self.version][self._platform][self.bit]
destination_dir = ''.join(self._bin_path.split(os.pathsep))
with download_file(url) as name:
_mkdirp(destination_dir)
z = zipfile.ZipFile(name, 'r')
z.extractall(destination_dir)
z.close()
for filename in glob.iglob(destination_dir + '/*'):
os.chmod(filename, 777)
def download_chromedriver(version=None, bit=None):
"""
Download the chromedriver binary.
If version is not set, then it will get the latest one. If the bit value is
not set then it will use the same value as the current system
"""
url = 'https://chromedriver.storage.googleapis.com/'
resp = requests.get(url)
tree = ElementTree.fromstring(resp.content)
ns = _get_xml_ns(tree.tag)
if version:
version = str(version)
if bit:
bit = str(bit)
else:
bit = '64' if IS_64_BIT else '32'
versions = {}
for elem in tree.findall('%sContents' % ns):
key = elem.find('%sKey' % ns)
m = re.match('^([\d.]+?)/chromedriver_(linux|mac|win)(32|64)', key.text)
if m:
v = m.group(1) # version
p = m.group(2) # platform
b = m.group(3) # bit
if v not in versions:
versions[v] = {}
if p not in versions[v]:
versions[v][p] = {}
versions[v][p][b] = url + key.text
if version and version not in versions:
raise RuntimeError('Chromedriver %s is not a valid version' % version)
if IS_WINDOWS:
p = 'win'
elif IS_LINUX:
p = 'linux'
elif IS_MAC:
p = 'mac'
else:
raise RuntimeError('Unable to detect current platform')
if version:
if bit is None:
download_url = versions[version][p][bit]
elif bit not in versions[version][p]:
raise RuntimeError('Invalid bit value %s' % bit)
else:
download_url = versions[version][p][bit]
else:
# get latest version
numbers = list(versions.keys())
numbers.sort(key=StrictVersion, reverse=True)
version = numbers[0]
download_url = versions[version][p][bit]
destination_dir = os.path.join(REPO_DIR, 'chromedriver',
'%s-%s%s' % (version, p, bit,))
if os.path.isdir(destination_dir):
return destination_dir
# download an unzip to repo directory
with download_file(download_url) as name:
_mkdirp(destination_dir)
z = zipfile.ZipFile(name, 'r')
z.extractall(destination_dir)
z.close()
for filename in glob.iglob(destination_dir + '/*'):
os.chmod(filename, 777)
return destination_dir
def get_binary(name, arch=None, version=None):
"""
Get the driver binary.
This will check the cache location to see if it has already been downloaded
and return its path. If it is not in the cache then it will be downloaded.
:param name: the binary name chromedriver,
:param arch:
:param version:
:return:
""" | 27.964567 | 84 | 0.579051 |
80df37b53bbc27178cffa48ac6b0ca5dba92823b | 22,587 | py | Python | contrib/devtools/copyright_header.py | wolfoxonly/fpc | bf372369fbbc271330527b5fb83d6285a865332c | [
"MIT"
] | null | null | null | contrib/devtools/copyright_header.py | wolfoxonly/fpc | bf372369fbbc271330527b5fb83d6285a865332c | [
"MIT"
] | null | null | null | contrib/devtools/copyright_header.py | wolfoxonly/fpc | bf372369fbbc271330527b5fb83d6285a865332c | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2016-2017 The Flashpaychain Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
import re
import fnmatch
import sys
import subprocess
import datetime
import os
################################################################################
# file filtering
################################################################################
EXCLUDE = [
# libsecp256k1:
'src/secp256k1/include/secp256k1.h',
'src/secp256k1/include/secp256k1_ecdh.h',
'src/secp256k1/include/secp256k1_recovery.h',
'src/secp256k1/include/secp256k1_schnorr.h',
'src/secp256k1/src/java/org_Flashpaychain_NativeSecp256k1.c',
'src/secp256k1/src/java/org_Flashpaychain_NativeSecp256k1.h',
'src/secp256k1/src/java/org_Flashpaychain_Secp256k1Context.c',
'src/secp256k1/src/java/org_Flashpaychain_Secp256k1Context.h',
# univalue:
'src/univalue/test/object.cpp',
'src/univalue/lib/univalue_escapes.h',
# auto generated:
'src/qt/Flashpaychainstrings.cpp',
'src/chainparamsseeds.h',
# other external copyrights:
'src/tinyformat.h',
'src/leveldb/util/env_win.cc',
'src/crypto/ctaes/bench.c',
'test/functional/test_framework/bignum.py',
# python init:
'*__init__.py',
]
EXCLUDE_COMPILED = re.compile('|'.join([fnmatch.translate(m) for m in EXCLUDE]))
INCLUDE = ['*.h', '*.cpp', '*.cc', '*.c', '*.py']
INCLUDE_COMPILED = re.compile('|'.join([fnmatch.translate(m) for m in INCLUDE]))
def applies_to_file(filename):
return ((EXCLUDE_COMPILED.match(filename) is None) and
(INCLUDE_COMPILED.match(filename) is not None))
################################################################################
# obtain list of files in repo according to INCLUDE and EXCLUDE
################################################################################
GIT_LS_CMD = 'git ls-files'
def call_git_ls():
out = subprocess.check_output(GIT_LS_CMD.split(' '))
return [f for f in out.decode("utf-8").split('\n') if f != '']
def get_filenames_to_examine():
filenames = call_git_ls()
return sorted([filename for filename in filenames if
applies_to_file(filename)])
################################################################################
# define and compile regexes for the patterns we are looking for
################################################################################
COPYRIGHT_WITH_C = 'Copyright \(c\)'
COPYRIGHT_WITHOUT_C = 'Copyright'
ANY_COPYRIGHT_STYLE = '(%s|%s)' % (COPYRIGHT_WITH_C, COPYRIGHT_WITHOUT_C)
YEAR = "20[0-9][0-9]"
YEAR_RANGE = '(%s)(-%s)?' % (YEAR, YEAR)
YEAR_LIST = '(%s)(, %s)+' % (YEAR, YEAR)
ANY_YEAR_STYLE = '(%s|%s)' % (YEAR_RANGE, YEAR_LIST)
ANY_COPYRIGHT_STYLE_OR_YEAR_STYLE = ("%s %s" % (ANY_COPYRIGHT_STYLE,
ANY_YEAR_STYLE))
ANY_COPYRIGHT_COMPILED = re.compile(ANY_COPYRIGHT_STYLE_OR_YEAR_STYLE)
def compile_copyright_regex(copyright_style, year_style, name):
return re.compile('%s %s %s' % (copyright_style, year_style, name))
EXPECTED_HOLDER_NAMES = [
"Satoshi Nakamoto\n",
"The Flashpaychain Core developers\n",
"The Flashpaychain Core developers \n",
"Flashpaychain Core Developers\n",
"the Flashpaychain Core developers\n",
"The Flashpaychain developers\n",
"The LevelDB Authors\. All rights reserved\.\n",
"BitPay Inc\.\n",
"BitPay, Inc\.\n",
"University of Illinois at Urbana-Champaign\.\n",
"MarcoFalke\n",
"Pieter Wuille\n",
"Pieter Wuille +\*\n",
"Pieter Wuille, Gregory Maxwell +\*\n",
"Pieter Wuille, Andrew Poelstra +\*\n",
"Andrew Poelstra +\*\n",
"Wladimir J. van der Laan\n",
"Jeff Garzik\n",
"Diederik Huys, Pieter Wuille +\*\n",
"Thomas Daede, Cory Fields +\*\n",
"Jan-Klaas Kollhof\n",
"Sam Rushing\n",
"ArtForz -- public domain half-a-node\n",
]
DOMINANT_STYLE_COMPILED = {}
YEAR_LIST_STYLE_COMPILED = {}
WITHOUT_C_STYLE_COMPILED = {}
for holder_name in EXPECTED_HOLDER_NAMES:
DOMINANT_STYLE_COMPILED[holder_name] = (
compile_copyright_regex(COPYRIGHT_WITH_C, YEAR_RANGE, holder_name))
YEAR_LIST_STYLE_COMPILED[holder_name] = (
compile_copyright_regex(COPYRIGHT_WITH_C, YEAR_LIST, holder_name))
WITHOUT_C_STYLE_COMPILED[holder_name] = (
compile_copyright_regex(COPYRIGHT_WITHOUT_C, ANY_YEAR_STYLE,
holder_name))
################################################################################
# search file contents for copyright message of particular category
################################################################################
def get_count_of_copyrights_of_any_style_any_holder(contents):
return len(ANY_COPYRIGHT_COMPILED.findall(contents))
def file_has_dominant_style_copyright_for_holder(contents, holder_name):
match = DOMINANT_STYLE_COMPILED[holder_name].search(contents)
return match is not None
def file_has_year_list_style_copyright_for_holder(contents, holder_name):
match = YEAR_LIST_STYLE_COMPILED[holder_name].search(contents)
return match is not None
def file_has_without_c_style_copyright_for_holder(contents, holder_name):
match = WITHOUT_C_STYLE_COMPILED[holder_name].search(contents)
return match is not None
################################################################################
# get file info
################################################################################
def read_file(filename):
return open(os.path.abspath(filename), 'r').read()
def gather_file_info(filename):
info = {}
info['filename'] = filename
c = read_file(filename)
info['contents'] = c
info['all_copyrights'] = get_count_of_copyrights_of_any_style_any_holder(c)
info['classified_copyrights'] = 0
info['dominant_style'] = {}
info['year_list_style'] = {}
info['without_c_style'] = {}
for holder_name in EXPECTED_HOLDER_NAMES:
has_dominant_style = (
file_has_dominant_style_copyright_for_holder(c, holder_name))
has_year_list_style = (
file_has_year_list_style_copyright_for_holder(c, holder_name))
has_without_c_style = (
file_has_without_c_style_copyright_for_holder(c, holder_name))
info['dominant_style'][holder_name] = has_dominant_style
info['year_list_style'][holder_name] = has_year_list_style
info['without_c_style'][holder_name] = has_without_c_style
if has_dominant_style or has_year_list_style or has_without_c_style:
info['classified_copyrights'] = info['classified_copyrights'] + 1
return info
################################################################################
# report execution
################################################################################
SEPARATOR = '-'.join(['' for _ in range(80)])
def print_filenames(filenames, verbose):
if not verbose:
return
for filename in filenames:
print("\t%s" % filename)
def print_report(file_infos, verbose):
print(SEPARATOR)
examined = [i['filename'] for i in file_infos]
print("%d files examined according to INCLUDE and EXCLUDE fnmatch rules" %
len(examined))
print_filenames(examined, verbose)
print(SEPARATOR)
print('')
zero_copyrights = [i['filename'] for i in file_infos if
i['all_copyrights'] == 0]
print("%4d with zero copyrights" % len(zero_copyrights))
print_filenames(zero_copyrights, verbose)
one_copyright = [i['filename'] for i in file_infos if
i['all_copyrights'] == 1]
print("%4d with one copyright" % len(one_copyright))
print_filenames(one_copyright, verbose)
two_copyrights = [i['filename'] for i in file_infos if
i['all_copyrights'] == 2]
print("%4d with two copyrights" % len(two_copyrights))
print_filenames(two_copyrights, verbose)
three_copyrights = [i['filename'] for i in file_infos if
i['all_copyrights'] == 3]
print("%4d with three copyrights" % len(three_copyrights))
print_filenames(three_copyrights, verbose)
four_or_more_copyrights = [i['filename'] for i in file_infos if
i['all_copyrights'] >= 4]
print("%4d with four or more copyrights" % len(four_or_more_copyrights))
print_filenames(four_or_more_copyrights, verbose)
print('')
print(SEPARATOR)
print('Copyrights with dominant style:\ne.g. "Copyright (c)" and '
'"<year>" or "<startYear>-<endYear>":\n')
for holder_name in EXPECTED_HOLDER_NAMES:
dominant_style = [i['filename'] for i in file_infos if
i['dominant_style'][holder_name]]
if len(dominant_style) > 0:
print("%4d with '%s'" % (len(dominant_style),
holder_name.replace('\n', '\\n')))
print_filenames(dominant_style, verbose)
print('')
print(SEPARATOR)
print('Copyrights with year list style:\ne.g. "Copyright (c)" and '
'"<year1>, <year2>, ...":\n')
for holder_name in EXPECTED_HOLDER_NAMES:
year_list_style = [i['filename'] for i in file_infos if
i['year_list_style'][holder_name]]
if len(year_list_style) > 0:
print("%4d with '%s'" % (len(year_list_style),
holder_name.replace('\n', '\\n')))
print_filenames(year_list_style, verbose)
print('')
print(SEPARATOR)
print('Copyrights with no "(c)" style:\ne.g. "Copyright" and "<year>" or '
'"<startYear>-<endYear>":\n')
for holder_name in EXPECTED_HOLDER_NAMES:
without_c_style = [i['filename'] for i in file_infos if
i['without_c_style'][holder_name]]
if len(without_c_style) > 0:
print("%4d with '%s'" % (len(without_c_style),
holder_name.replace('\n', '\\n')))
print_filenames(without_c_style, verbose)
print('')
print(SEPARATOR)
unclassified_copyrights = [i['filename'] for i in file_infos if
i['classified_copyrights'] < i['all_copyrights']]
print("%d with unexpected copyright holder names" %
len(unclassified_copyrights))
print_filenames(unclassified_copyrights, verbose)
print(SEPARATOR)
def exec_report(base_directory, verbose):
original_cwd = os.getcwd()
os.chdir(base_directory)
filenames = get_filenames_to_examine()
file_infos = [gather_file_info(f) for f in filenames]
print_report(file_infos, verbose)
os.chdir(original_cwd)
################################################################################
# report cmd
################################################################################
REPORT_USAGE = """
Produces a report of all copyright header notices found inside the source files
of a repository.
Usage:
$ ./copyright_header.py report <base_directory> [verbose]
Arguments:
<base_directory> - The base directory of a Flashpaychain source code repository.
[verbose] - Includes a list of every file of each subcategory in the report.
"""
def report_cmd(argv):
if len(argv) == 2:
sys.exit(REPORT_USAGE)
base_directory = argv[2]
if not os.path.exists(base_directory):
sys.exit("*** bad <base_directory>: %s" % base_directory)
if len(argv) == 3:
verbose = False
elif argv[3] == 'verbose':
verbose = True
else:
sys.exit("*** unknown argument: %s" % argv[2])
exec_report(base_directory, verbose)
################################################################################
# query git for year of last change
################################################################################
GIT_LOG_CMD = "git log --pretty=format:%%ai %s"
def call_git_log(filename):
out = subprocess.check_output((GIT_LOG_CMD % filename).split(' '))
return out.decode("utf-8").split('\n')
def get_git_change_years(filename):
git_log_lines = call_git_log(filename)
if len(git_log_lines) == 0:
return [datetime.date.today().year]
# timestamp is in ISO 8601 format. e.g. "2016-09-05 14:25:32 -0600"
return [line.split(' ')[0].split('-')[0] for line in git_log_lines]
def get_most_recent_git_change_year(filename):
return max(get_git_change_years(filename))
################################################################################
# read and write to file
################################################################################
def read_file_lines(filename):
f = open(os.path.abspath(filename), 'r')
file_lines = f.readlines()
f.close()
return file_lines
def write_file_lines(filename, file_lines):
f = open(os.path.abspath(filename), 'w')
f.write(''.join(file_lines))
f.close()
################################################################################
# update header years execution
################################################################################
COPYRIGHT = 'Copyright \(c\)'
YEAR = "20[0-9][0-9]"
YEAR_RANGE = '(%s)(-%s)?' % (YEAR, YEAR)
HOLDER = 'The Flashpaychain Core developers'
UPDATEABLE_LINE_COMPILED = re.compile(' '.join([COPYRIGHT, YEAR_RANGE, HOLDER]))
def get_updatable_copyright_line(file_lines):
index = 0
for line in file_lines:
if UPDATEABLE_LINE_COMPILED.search(line) is not None:
return index, line
index = index + 1
return None, None
def parse_year_range(year_range):
year_split = year_range.split('-')
start_year = year_split[0]
if len(year_split) == 1:
return start_year, start_year
return start_year, year_split[1]
def year_range_to_str(start_year, end_year):
if start_year == end_year:
return start_year
return "%s-%s" % (start_year, end_year)
def create_updated_copyright_line(line, last_git_change_year):
copyright_splitter = 'Copyright (c) '
copyright_split = line.split(copyright_splitter)
# Preserve characters on line that are ahead of the start of the copyright
# notice - they are part of the comment block and vary from file-to-file.
before_copyright = copyright_split[0]
after_copyright = copyright_split[1]
space_split = after_copyright.split(' ')
year_range = space_split[0]
start_year, end_year = parse_year_range(year_range)
if end_year == last_git_change_year:
return line
return (before_copyright + copyright_splitter +
year_range_to_str(start_year, last_git_change_year) + ' ' +
' '.join(space_split[1:]))
def update_updatable_copyright(filename):
file_lines = read_file_lines(filename)
index, line = get_updatable_copyright_line(file_lines)
if not line:
print_file_action_message(filename, "No updatable copyright.")
return
last_git_change_year = get_most_recent_git_change_year(filename)
new_line = create_updated_copyright_line(line, last_git_change_year)
if line == new_line:
print_file_action_message(filename, "Copyright up-to-date.")
return
file_lines[index] = new_line
write_file_lines(filename, file_lines)
print_file_action_message(filename,
"Copyright updated! -> %s" % last_git_change_year)
def exec_update_header_year(base_directory):
original_cwd = os.getcwd()
os.chdir(base_directory)
for filename in get_filenames_to_examine():
update_updatable_copyright(filename)
os.chdir(original_cwd)
################################################################################
# update cmd
################################################################################
UPDATE_USAGE = """
Updates all the copyright headers of "The Flashpaychain Core developers" which were
changed in a year more recent than is listed. For example:
// Copyright (c) <firstYear>-<lastYear> The Flashpaychain Core developers
will be updated to:
// Copyright (c) <firstYear>-<lastModifiedYear> The Flashpaychain Core developers
where <lastModifiedYear> is obtained from the 'git log' history.
This subcommand also handles copyright headers that have only a single year. In those cases:
// Copyright (c) <year> The Flashpaychain Core developers
will be updated to:
// Copyright (c) <year>-<lastModifiedYear> The Flashpaychain Core developers
where the update is appropriate.
Usage:
$ ./copyright_header.py update <base_directory>
Arguments:
<base_directory> - The base directory of a Flashpaychain source code repository.
"""
def print_file_action_message(filename, action):
print("%-52s %s" % (filename, action))
def update_cmd(argv):
if len(argv) != 3:
sys.exit(UPDATE_USAGE)
base_directory = argv[2]
if not os.path.exists(base_directory):
sys.exit("*** bad base_directory: %s" % base_directory)
exec_update_header_year(base_directory)
################################################################################
# inserted copyright header format
################################################################################
def get_header_lines(header, start_year, end_year):
lines = header.split('\n')[1:-1]
lines[0] = lines[0] % year_range_to_str(start_year, end_year)
return [line + '\n' for line in lines]
CPP_HEADER = '''
// Copyright (c) %s The Flashpaychain Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
def get_cpp_header_lines_to_insert(start_year, end_year):
return reversed(get_header_lines(CPP_HEADER, start_year, end_year))
PYTHON_HEADER = '''
# Copyright (c) %s The Flashpaychain Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
def get_python_header_lines_to_insert(start_year, end_year):
return reversed(get_header_lines(PYTHON_HEADER, start_year, end_year))
################################################################################
# query git for year of last change
################################################################################
def get_git_change_year_range(filename):
years = get_git_change_years(filename)
return min(years), max(years)
################################################################################
# check for existing core copyright
################################################################################
def file_already_has_core_copyright(file_lines):
index, _ = get_updatable_copyright_line(file_lines)
return index != None
################################################################################
# insert header execution
################################################################################
def file_has_hashbang(file_lines):
if len(file_lines) < 1:
return False
if len(file_lines[0]) <= 2:
return False
return file_lines[0][:2] == '#!'
def insert_python_header(filename, file_lines, start_year, end_year):
if file_has_hashbang(file_lines):
insert_idx = 1
else:
insert_idx = 0
header_lines = get_python_header_lines_to_insert(start_year, end_year)
for line in header_lines:
file_lines.insert(insert_idx, line)
write_file_lines(filename, file_lines)
def insert_cpp_header(filename, file_lines, start_year, end_year):
header_lines = get_cpp_header_lines_to_insert(start_year, end_year)
for line in header_lines:
file_lines.insert(0, line)
write_file_lines(filename, file_lines)
def exec_insert_header(filename, style):
file_lines = read_file_lines(filename)
if file_already_has_core_copyright(file_lines):
sys.exit('*** %s already has a copyright by The Flashpaychain Core developers'
% (filename))
start_year, end_year = get_git_change_year_range(filename)
if style == 'python':
insert_python_header(filename, file_lines, start_year, end_year)
else:
insert_cpp_header(filename, file_lines, start_year, end_year)
################################################################################
# insert cmd
################################################################################
INSERT_USAGE = """
Inserts a copyright header for "The Flashpaychain Core developers" at the top of the
file in either Python or C++ style as determined by the file extension. If the
file is a Python file and it has a '#!' starting the first line, the header is
inserted in the line below it.
The copyright dates will be set to be:
"<year_introduced>-<current_year>"
where <year_introduced> is according to the 'git log' history. If
<year_introduced> is equal to <current_year>, the date will be set to be:
"<current_year>"
If the file already has a copyright for "The Flashpaychain Core developers", the
script will exit.
Usage:
$ ./copyright_header.py insert <file>
Arguments:
<file> - A source file in the Flashpaychain repository.
"""
def insert_cmd(argv):
if len(argv) != 3:
sys.exit(INSERT_USAGE)
filename = argv[2]
if not os.path.isfile(filename):
sys.exit("*** bad filename: %s" % filename)
_, extension = os.path.splitext(filename)
if extension not in ['.h', '.cpp', '.cc', '.c', '.py']:
sys.exit("*** cannot insert for file extension %s" % extension)
if extension == '.py':
style = 'python'
else:
style = 'cpp'
exec_insert_header(filename, style)
################################################################################
# UI
################################################################################
USAGE = """
copyright_header.py - utilities for managing copyright headers of 'The Flashpaychain
Core developers' in repository source files.
Usage:
$ ./copyright_header <subcommand>
Subcommands:
report
update
insert
To see subcommand usage, run them without arguments.
"""
SUBCOMMANDS = ['report', 'update', 'insert']
if __name__ == "__main__":
if len(sys.argv) == 1:
sys.exit(USAGE)
subcommand = sys.argv[1]
if subcommand not in SUBCOMMANDS:
sys.exit(USAGE)
if subcommand == 'report':
report_cmd(sys.argv)
elif subcommand == 'update':
update_cmd(sys.argv)
elif subcommand == 'insert':
insert_cmd(sys.argv)
| 36.846656 | 92 | 0.603312 |
d39b8018ae61f46c53654c0ead6845b4bdc12b93 | 2,503 | py | Python | tools/generate-devstack-plugins-list.py | adduarte/devstack | ae7e10f5ee967efc7011e1c3f30dc96c53081580 | [
"Apache-2.0"
] | 1 | 2019-11-28T00:10:55.000Z | 2019-11-28T00:10:55.000Z | tools/generate-devstack-plugins-list.py | mykiep/devstack | 595759c764eebbaffeb120f178dcd9069fbba070 | [
"Apache-2.0"
] | null | null | null | tools/generate-devstack-plugins-list.py | mykiep/devstack | 595759c764eebbaffeb120f178dcd9069fbba070 | [
"Apache-2.0"
] | null | null | null | #! /usr/bin/env python
# Copyright 2016 Hewlett Packard Enterprise Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# This script is intended to be run as part of a periodic proposal bot
# job in OpenStack infrastructure.
#
# In order to function correctly, the environment in which the
# script runs must have
# * network access to the review.opendev.org Gerrit API
# working directory
# * network access to https://opendev.org/
import functools
import logging
import json
import requests
from requests.adapters import HTTPAdapter
from requests.packages.urllib3.util.retry import Retry
logging.basicConfig(level=logging.DEBUG)
url = 'https://review.opendev.org/projects/'
# This is what a project looks like
'''
"openstack-attic/akanda": {
"id": "openstack-attic%2Fakanda",
"state": "READ_ONLY"
},
'''
def is_in_wanted_namespace(proj):
# only interested in openstack or x namespace (e.g. not retired
# stackforge, etc)
if proj.startswith('stackforge/') or \
proj.startswith('stackforge-attic/'):
return False
else:
return True
# Check if this project has a plugin file
def has_devstack_plugin(session, proj):
# Don't link in the deb packaging repos
if "openstack/deb-" in proj:
return False
r = session.get("https://opendev.org/%s/raw/branch/master/devstack/plugin.sh" % proj)
return r.status_code == 200
logging.debug("Getting project list from %s" % url)
r = requests.get(url)
projects = sorted(filter(is_in_wanted_namespace, json.loads(r.text[4:])))
logging.debug("Found %d projects" % len(projects))
s = requests.Session()
# sometimes gitea gives us a 500 error; retry sanely
# https://stackoverflow.com/a/35636367
retries = Retry(total=3, backoff_factor=1,
status_forcelist=[ 500 ])
s.mount('https://', HTTPAdapter(max_retries=retries))
found_plugins = filter(functools.partial(has_devstack_plugin, s), projects)
for project in found_plugins:
print(project)
| 31.683544 | 89 | 0.728726 |
0528d89518a9bb1116801e4620cbae112362746b | 467 | py | Python | pubtrack/wsgi.py | the16thpythonist/pubtrack | 5d44914ed6aafe01dc7b234e9f4e8d12e45dfb50 | [
"MIT"
] | null | null | null | pubtrack/wsgi.py | the16thpythonist/pubtrack | 5d44914ed6aafe01dc7b234e9f4e8d12e45dfb50 | [
"MIT"
] | 4 | 2020-07-28T10:04:57.000Z | 2021-09-22T19:03:17.000Z | pubtrack/wsgi.py | the16thpythonist/pubtrack | 5d44914ed6aafe01dc7b234e9f4e8d12e45dfb50 | [
"MIT"
] | null | null | null | """
WSGI config for pubtrack project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/howto/deployment/wsgi/gunicorn/
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "pubtrack.config")
os.environ.setdefault("DJANGO_CONFIGURATION", "Production")
from configurations.wsgi import get_wsgi_application # noqa
application = get_wsgi_application()
| 33.357143 | 78 | 0.796574 |
46a74f3b9526f3910c3a8e83f21570b4a6798352 | 239 | py | Python | data_science_toolbox/gists/profile_data.py | safurrier/data_science_utils | 842b025ea3197e8a9946401257b2fa22ef1bf82d | [
"MIT"
] | 3 | 2019-02-14T21:16:15.000Z | 2020-05-05T20:20:42.000Z | data_science_toolbox/gists/profile_data.py | safurrier/data_science_utils | 842b025ea3197e8a9946401257b2fa22ef1bf82d | [
"MIT"
] | null | null | null | data_science_toolbox/gists/profile_data.py | safurrier/data_science_utils | 842b025ea3197e8a9946401257b2fa22ef1bf82d | [
"MIT"
] | 1 | 2019-07-11T02:06:49.000Z | 2019-07-11T02:06:49.000Z | import pathlib
import pandas_profiling
output_profile_path = pathlib.Path("reports/data-profiling/01-01-initial_data_profiling.html")
profile = pandas_profiling.ProfileReport(data)
profile.to_file(outputfile=output_profile_path.as_posix()) | 47.8 | 94 | 0.861925 |
c71a163fb25ad1a3b45cde8aabeace218d509426 | 347 | py | Python | Python/Unsorted/122a.py | LittleEndu/Codeforces | 82c49b10702c58bc5ce062801d740a2f5f600062 | [
"MIT"
] | null | null | null | Python/Unsorted/122a.py | LittleEndu/Codeforces | 82c49b10702c58bc5ce062801d740a2f5f600062 | [
"MIT"
] | null | null | null | Python/Unsorted/122a.py | LittleEndu/Codeforces | 82c49b10702c58bc5ce062801d740a2f5f600062 | [
"MIT"
] | null | null | null | # Smallest:
# n=int(input());print('YNEOS'[all(set(str(i))-set('47')or n%i for i in range(n+1))::2])
# No idea...
# Also not going to brute force it against test cases like others seemed to have. My solution would work for any number
d=[4,7,44,47,74,77,444,447,474,477,744,747,774,777]
a=int(input())
print(['NO','YES'][any([a%i==0 for i in d])]) | 43.375 | 119 | 0.657061 |
3309f4e6f75e02a6095d2576239860c9e4523a5b | 300 | py | Python | 400-499/414.py | linyk9/leetcode | eaaadead32cc9d3884543f4ed832bb3cb5aa68e6 | [
"MIT"
] | null | null | null | 400-499/414.py | linyk9/leetcode | eaaadead32cc9d3884543f4ed832bb3cb5aa68e6 | [
"MIT"
] | null | null | null | 400-499/414.py | linyk9/leetcode | eaaadead32cc9d3884543f4ed832bb3cb5aa68e6 | [
"MIT"
] | null | null | null | class Solution:
def thirdMax(self, nums: List[int]) -> int:
nums = list(set([-x for x in nums]))
if len(nums) < 3:
return -min(nums)
heapq.heapify(nums)
# print(nums)
for _ in range(3):
ans = -heapq.heappop(nums)
return ans
| 27.272727 | 47 | 0.506667 |
0842a59885e80171480b3f16ba9da0e5af6cf39e | 3,327 | py | Python | benchmark/startCirq873.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | benchmark/startCirq873.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | benchmark/startCirq873.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 5/15/20 4:49 PM
# @File : grover.py
# qubit number=5
# total number=38
import cirq
import cirq.google as cg
from typing import Optional
import sys
from math import log2
import numpy as np
#thatsNoCode
from cirq.contrib.svg import SVGCircuit
# Symbols for the rotation angles in the QAOA circuit.
def make_circuit(n: int, input_qubit):
c = cirq.Circuit() # circuit begin
c.append(cirq.H.on(input_qubit[0])) # number=3
c.append(cirq.H.on(input_qubit[1])) # number=4
c.append(cirq.H.on(input_qubit[2])) # number=5
c.append(cirq.CNOT.on(input_qubit[0],input_qubit[2])) # number=35
c.append(cirq.X.on(input_qubit[2])) # number=36
c.append(cirq.CNOT.on(input_qubit[0],input_qubit[2])) # number=37
c.append(cirq.H.on(input_qubit[3])) # number=6
c.append(cirq.H.on(input_qubit[4])) # number=21
for i in range(2):
c.append(cirq.H.on(input_qubit[0])) # number=1
c.append(cirq.H.on(input_qubit[1])) # number=2
c.append(cirq.Y.on(input_qubit[1])) # number=31
c.append(cirq.H.on(input_qubit[2])) # number=7
c.append(cirq.H.on(input_qubit[3])) # number=8
c.append(cirq.H.on(input_qubit[0])) # number=17
c.append(cirq.H.on(input_qubit[1])) # number=18
c.append(cirq.H.on(input_qubit[2])) # number=19
c.append(cirq.rx(1.7404423300887455).on(input_qubit[1])) # number=32
c.append(cirq.Z.on(input_qubit[1])) # number=33
c.append(cirq.H.on(input_qubit[3])) # number=20
c.append(cirq.CNOT.on(input_qubit[1],input_qubit[0])) # number=28
c.append(cirq.X.on(input_qubit[0])) # number=29
c.append(cirq.CNOT.on(input_qubit[1],input_qubit[0])) # number=30
c.append(cirq.X.on(input_qubit[1])) # number=10
c.append(cirq.CNOT.on(input_qubit[0],input_qubit[2])) # number=22
c.append(cirq.CNOT.on(input_qubit[0],input_qubit[2])) # number=25
c.append(cirq.X.on(input_qubit[2])) # number=26
c.append(cirq.CNOT.on(input_qubit[0],input_qubit[2])) # number=27
c.append(cirq.CNOT.on(input_qubit[0],input_qubit[2])) # number=24
c.append(cirq.X.on(input_qubit[3])) # number=12
c.append(cirq.X.on(input_qubit[0])) # number=13
c.append(cirq.X.on(input_qubit[1])) # number=14
c.append(cirq.X.on(input_qubit[2])) # number=15
c.append(cirq.X.on(input_qubit[3])) # number=16
# circuit end
c.append(cirq.measure(*input_qubit, key='result'))
return c
def bitstring(bits):
return ''.join(str(int(b)) for b in bits)
if __name__ == '__main__':
qubit_count = 5
input_qubits = [cirq.GridQubit(i, 0) for i in range(qubit_count)]
circuit = make_circuit(qubit_count,input_qubits)
circuit = cg.optimized_for_sycamore(circuit, optimizer_type='sqrt_iswap')
circuit_sample_count =2000
simulator = cirq.Simulator()
result = simulator.run(circuit, repetitions=circuit_sample_count)
frequencies = result.histogram(key='result', fold_func=bitstring)
writefile = open("../data/startCirq873.csv","w+")
print(format(frequencies),file=writefile)
print("results end", file=writefile)
print(circuit.__len__(), file=writefile)
print(circuit,file=writefile)
writefile.close() | 36.163043 | 77 | 0.655546 |
6b73e99235cd9915ea7dfcd0aaa870e458a304a7 | 34,139 | py | Python | venv/lib/python3.7/site-packages/mysqlx/protocol.py | sa77irak/flask_tuto_hf | 4994e6053e9784702c4c70cf60b376750ec91ae9 | [
"MIT"
] | null | null | null | venv/lib/python3.7/site-packages/mysqlx/protocol.py | sa77irak/flask_tuto_hf | 4994e6053e9784702c4c70cf60b376750ec91ae9 | [
"MIT"
] | 10 | 2020-02-12T01:16:17.000Z | 2022-03-12T00:51:05.000Z | backend/Lib/site-packages/mysqlx/protocol.py | koyota79/weekly-report | a9f518965d9f51e81c13711a8b0bfda336eae2a0 | [
"bzip2-1.0.6"
] | 1 | 2020-07-20T17:25:48.000Z | 2020-07-20T17:25:48.000Z | # Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License, version 2.0, as
# published by the Free Software Foundation.
#
# This program is also distributed with certain software (including
# but not limited to OpenSSL) that is licensed under separate terms,
# as designated in a particular file or component or in included license
# documentation. The authors of MySQL hereby grant you an
# additional permission to link the program and your derivative works
# with the separately licensed software that they have included with
# MySQL.
#
# Without limiting anything contained in the foregoing, this file,
# which is part of MySQL Connector/Python, is also subject to the
# Universal FOSS Exception, version 1.0, a copy of which can be found at
# http://oss.oracle.com/licenses/universal-foss-exception.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License, version 2.0, for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""Implementation of the X protocol for MySQL servers."""
import struct
from .compat import STRING_TYPES, INT_TYPES
from .errors import (InterfaceError, NotSupportedError, OperationalError,
ProgrammingError)
from .expr import (ExprParser, build_expr, build_scalar, build_bool_scalar,
build_int_scalar, build_unsigned_int_scalar)
from .helpers import encode_to_bytes, get_item_or_attr
from .result import Column
from .protobuf import (CRUD_PREPARE_MAPPING, SERVER_MESSAGES,
PROTOBUF_REPEATED_TYPES, Message, mysqlxpb_enum)
class MessageReaderWriter(object):
"""Implements a Message Reader/Writer.
Args:
socket_stream (mysqlx.connection.SocketStream): `SocketStream` object.
"""
def __init__(self, socket_stream):
self._stream = socket_stream
self._msg = None
def _read_message(self):
"""Read message.
Raises:
:class:`mysqlx.ProgrammingError`: If e connected server does not
have the MySQL X protocol plugin
enabled.
Returns:
mysqlx.protobuf.Message: MySQL X Protobuf Message.
"""
hdr = self._stream.read(5)
msg_len, msg_type = struct.unpack("<LB", hdr)
if msg_type == 10:
raise ProgrammingError("The connected server does not have the "
"MySQL X protocol plugin enabled or protocol"
"mismatch")
payload = self._stream.read(msg_len - 1)
msg_type_name = SERVER_MESSAGES.get(msg_type)
if not msg_type_name:
raise ValueError("Unknown msg_type: {0}".format(msg_type))
# Do not parse empty notices, Message requires a type in payload.
if msg_type == 11 and payload == b"":
return self._read_message()
else:
try:
msg = Message.from_server_message(msg_type, payload)
except RuntimeError:
return self._read_message()
return msg
def read_message(self):
"""Read message.
Returns:
mysqlx.protobuf.Message: MySQL X Protobuf Message.
"""
if self._msg is not None:
msg = self._msg
self._msg = None
return msg
return self._read_message()
def push_message(self, msg):
"""Push message.
Args:
msg (mysqlx.protobuf.Message): MySQL X Protobuf Message.
Raises:
:class:`mysqlx.OperationalError`: If message push slot is full.
"""
if self._msg is not None:
raise OperationalError("Message push slot is full")
self._msg = msg
def write_message(self, msg_id, msg):
"""Write message.
Args:
msg_id (int): The message ID.
msg (mysqlx.protobuf.Message): MySQL X Protobuf Message.
"""
msg_str = encode_to_bytes(msg.serialize_to_string())
header = struct.pack("<LB", len(msg_str) + 1, msg_id)
self._stream.sendall(b"".join([header, msg_str]))
class Protocol(object):
"""Implements the MySQL X Protocol.
Args:
read_writer (mysqlx.protocol.MessageReaderWriter): A Message \
Reader/Writer object.
"""
def __init__(self, reader_writer):
self._reader = reader_writer
self._writer = reader_writer
self._message = None
def _apply_filter(self, msg, stmt):
"""Apply filter.
Args:
msg (mysqlx.protobuf.Message): The MySQL X Protobuf Message.
stmt (Statement): A `Statement` based type object.
"""
if stmt.has_where:
msg["criteria"] = stmt.get_where_expr()
if stmt.has_sort:
msg["order"].extend(stmt.get_sort_expr())
if stmt.has_group_by:
msg["grouping"].extend(stmt.get_grouping())
if stmt.has_having:
msg["grouping_criteria"] = stmt.get_having()
def _create_any(self, arg):
"""Create any.
Args:
arg (object): Arbitrary object.
Returns:
mysqlx.protobuf.Message: MySQL X Protobuf Message.
"""
if isinstance(arg, STRING_TYPES):
value = Message("Mysqlx.Datatypes.Scalar.String", value=arg)
scalar = Message("Mysqlx.Datatypes.Scalar", type=8, v_string=value)
return Message("Mysqlx.Datatypes.Any", type=1, scalar=scalar)
elif isinstance(arg, bool):
return Message("Mysqlx.Datatypes.Any", type=1,
scalar=build_bool_scalar(arg))
elif isinstance(arg, INT_TYPES):
if arg < 0:
return Message("Mysqlx.Datatypes.Any", type=1,
scalar=build_int_scalar(arg))
return Message("Mysqlx.Datatypes.Any", type=1,
scalar=build_unsigned_int_scalar(arg))
elif isinstance(arg, tuple) and len(arg) == 2:
arg_key, arg_value = arg
obj_fld = Message("Mysqlx.Datatypes.Object.ObjectField",
key=arg_key, value=self._create_any(arg_value))
obj = Message("Mysqlx.Datatypes.Object",
fld=[obj_fld.get_message()])
return Message("Mysqlx.Datatypes.Any", type=2, obj=obj)
elif isinstance(arg, dict) or (isinstance(arg, (list, tuple)) and
isinstance(arg[0], dict)):
array_values = []
for items in arg:
obj_flds = []
for key, value in items.items():
# Array can only handle Any types, Mysqlx.Datatypes.Any.obj
obj_fld = Message("Mysqlx.Datatypes.Object.ObjectField",
key=key, value=self._create_any(value))
obj_flds.append(obj_fld.get_message())
msg_obj = Message("Mysqlx.Datatypes.Object", fld=obj_flds)
msg_any = Message("Mysqlx.Datatypes.Any", type=2, obj=msg_obj)
array_values.append(msg_any.get_message())
msg = Message("Mysqlx.Datatypes.Array")
msg["value"] = array_values
return Message("Mysqlx.Datatypes.Any", type=3, array=msg)
return None
def _get_binding_args(self, stmt, is_scalar=True):
"""Returns the binding any/scalar.
Args:
stmt (Statement): A `Statement` based type object.
is_scalar (bool): `True` to return scalar values.
Raises:
:class:`mysqlx.ProgrammingError`: If unable to find placeholder for
parameter.
Returns:
list: A list of ``Any`` or ``Scalar`` objects.
"""
build_value = lambda value: build_scalar(value).get_message() \
if is_scalar else self._create_any(value).get_message()
bindings = stmt.get_bindings()
binding_map = stmt.get_binding_map()
# If binding_map is None it's a SqlStatement object
if binding_map is None:
return [build_value(value) for value in bindings]
count = len(binding_map)
args = count * [None]
if count != len(bindings):
raise ProgrammingError("The number of bind parameters and "
"placeholders do not match")
for name, value in bindings.items():
if name not in binding_map:
raise ProgrammingError("Unable to find placeholder for "
"parameter: {0}".format(name))
pos = binding_map[name]
args[pos] = build_value(value)
return args
def _process_frame(self, msg, result):
"""Process frame.
Args:
msg (mysqlx.protobuf.Message): A MySQL X Protobuf Message.
result (Result): A `Result` based type object.
"""
if msg["type"] == 1:
warn_msg = Message.from_message("Mysqlx.Notice.Warning",
msg["payload"])
result.append_warning(warn_msg.level, warn_msg.code, warn_msg.msg)
elif msg["type"] == 2:
Message.from_message("Mysqlx.Notice.SessionVariableChanged",
msg["payload"])
elif msg["type"] == 3:
sess_state_msg = Message.from_message(
"Mysqlx.Notice.SessionStateChanged", msg["payload"])
if sess_state_msg["param"] == mysqlxpb_enum(
"Mysqlx.Notice.SessionStateChanged.Parameter."
"GENERATED_DOCUMENT_IDS"):
result.set_generated_ids(
[get_item_or_attr(
get_item_or_attr(value, 'v_octets'), 'value').decode()
for value in sess_state_msg["value"]])
else: # Following results are unitary and not a list
sess_state_value = sess_state_msg["value"][0] \
if isinstance(sess_state_msg["value"],
tuple(PROTOBUF_REPEATED_TYPES)) \
else sess_state_msg["value"]
if sess_state_msg["param"] == mysqlxpb_enum(
"Mysqlx.Notice.SessionStateChanged.Parameter."
"ROWS_AFFECTED"):
result.set_rows_affected(
get_item_or_attr(sess_state_value, "v_unsigned_int"))
elif sess_state_msg["param"] == mysqlxpb_enum(
"Mysqlx.Notice.SessionStateChanged.Parameter."
"GENERATED_INSERT_ID"):
result.set_generated_insert_id(get_item_or_attr(
sess_state_value, "v_unsigned_int"))
def _read_message(self, result):
"""Read message.
Args:
result (Result): A `Result` based type object.
"""
while True:
msg = self._reader.read_message()
if msg.type == "Mysqlx.Error":
raise OperationalError(msg["msg"], msg["code"])
elif msg.type == "Mysqlx.Notice.Frame":
try:
self._process_frame(msg, result)
except:
continue
elif msg.type == "Mysqlx.Sql.StmtExecuteOk":
return None
elif msg.type == "Mysqlx.Resultset.FetchDone":
result.set_closed(True)
elif msg.type == "Mysqlx.Resultset.FetchDoneMoreResultsets":
result.set_has_more_results(True)
elif msg.type == "Mysqlx.Resultset.Row":
result.set_has_data(True)
break
else:
break
return msg
def get_capabilites(self):
"""Get capabilities.
Returns:
mysqlx.protobuf.Message: MySQL X Protobuf Message.
"""
msg = Message("Mysqlx.Connection.CapabilitiesGet")
self._writer.write_message(
mysqlxpb_enum("Mysqlx.ClientMessages.Type.CON_CAPABILITIES_GET"),
msg)
msg = self._reader.read_message()
while msg.type == "Mysqlx.Notice.Frame":
msg = self._reader.read_message()
return msg
def set_capabilities(self, **kwargs):
"""Set capabilities.
Args:
**kwargs: Arbitrary keyword arguments.
Returns:
mysqlx.protobuf.Message: MySQL X Protobuf Message.
"""
capabilities = Message("Mysqlx.Connection.Capabilities")
for key, value in kwargs.items():
capability = Message("Mysqlx.Connection.Capability")
capability["name"] = key
if isinstance(value, dict):
items = value
obj_flds = []
for item in items:
obj_fld = Message("Mysqlx.Datatypes.Object.ObjectField",
key=item,
value=self._create_any(items[item]))
obj_flds.append(obj_fld.get_message())
msg_obj = Message("Mysqlx.Datatypes.Object", fld=obj_flds)
msg_any = Message("Mysqlx.Datatypes.Any", type=2, obj=msg_obj)
capability["value"] = msg_any.get_message()
else:
capability["value"] = self._create_any(value)
capabilities["capabilities"].extend([capability.get_message()])
msg = Message("Mysqlx.Connection.CapabilitiesSet")
msg["capabilities"] = capabilities
self._writer.write_message(
mysqlxpb_enum("Mysqlx.ClientMessages.Type.CON_CAPABILITIES_SET"),
msg)
try:
return self.read_ok()
except InterfaceError as err:
# Skip capability "session_connect_attrs" error since
# is only available on version >= 8.0.16
if err.errno != 5002:
raise
return None
def set_session_capabilities(self, **kwargs):
"""Set capabilities.
Args:
**kwargs: Arbitrary keyword arguments.
Returns:
mysqlx.protobuf.Message: MySQL X Protobuf Message.
"""
capabilities = Message("Mysqlx.Connection.Capabilities")
for key, value in kwargs.items():
capability = Message("Mysqlx.Connection.Capability")
capability["name"] = key
capability["value"] = self._create_any(value)
capabilities["capabilities"].extend([capability.get_message()])
msg = Message("Mysqlx.Connection.CapabilitiesSet")
msg["session_connect_attrs"] = capabilities
self._writer.write_message(
mysqlxpb_enum("Mysqlx.ClientMessages.Type.CON_CAPABILITIES_SET"),
msg)
return self.read_ok()
def send_auth_start(self, method, auth_data=None, initial_response=None):
"""Send authenticate start.
Args:
method (str): Message method.
auth_data (Optional[str]): Authentication data.
initial_response (Optional[str]): Initial response.
"""
msg = Message("Mysqlx.Session.AuthenticateStart")
msg["mech_name"] = method
if auth_data is not None:
msg["auth_data"] = auth_data
if initial_response is not None:
msg["initial_response"] = initial_response
self._writer.write_message(mysqlxpb_enum(
"Mysqlx.ClientMessages.Type.SESS_AUTHENTICATE_START"), msg)
def read_auth_continue(self):
"""Read authenticate continue.
Raises:
:class:`InterfaceError`: If the message type is not
`Mysqlx.Session.AuthenticateContinue`
Returns:
str: The authentication data.
"""
msg = self._reader.read_message()
while msg.type == "Mysqlx.Notice.Frame":
msg = self._reader.read_message()
if msg.type != "Mysqlx.Session.AuthenticateContinue":
raise InterfaceError("Unexpected message encountered during "
"authentication handshake")
return msg["auth_data"]
def send_auth_continue(self, auth_data):
"""Send authenticate continue.
Args:
auth_data (str): Authentication data.
"""
msg = Message("Mysqlx.Session.AuthenticateContinue",
auth_data=auth_data)
self._writer.write_message(mysqlxpb_enum(
"Mysqlx.ClientMessages.Type.SESS_AUTHENTICATE_CONTINUE"), msg)
def read_auth_ok(self):
"""Read authenticate OK.
Raises:
:class:`mysqlx.InterfaceError`: If message type is `Mysqlx.Error`.
"""
while True:
msg = self._reader.read_message()
if msg.type == "Mysqlx.Session.AuthenticateOk":
break
if msg.type == "Mysqlx.Error":
raise InterfaceError(msg.msg)
def send_prepare_prepare(self, msg_type, msg, stmt):
"""
Send prepare statement.
Args:
msg_type (str): Message ID string.
msg (mysqlx.protobuf.Message): MySQL X Protobuf Message.
stmt (Statement): A `Statement` based type object.
Raises:
:class:`mysqlx.NotSupportedError`: If prepared statements are not
supported.
.. versionadded:: 8.0.16
"""
if stmt.has_limit and msg.type != "Mysqlx.Crud.Insert":
# Remove 'limit' from message by building a new one
if msg.type == "Mysqlx.Crud.Find":
_, msg = self.build_find(stmt)
elif msg.type == "Mysqlx.Crud.Update":
_, msg = self.build_update(stmt)
elif msg.type == "Mysqlx.Crud.Delete":
_, msg = self.build_delete(stmt)
else:
raise ValueError("Invalid message type: {}".format(msg_type))
# Build 'limit_expr' message
position = len(stmt.get_bindings())
placeholder = mysqlxpb_enum("Mysqlx.Expr.Expr.Type.PLACEHOLDER")
msg_limit_expr = Message("Mysqlx.Crud.LimitExpr")
msg_limit_expr["row_count"] = Message("Mysqlx.Expr.Expr",
type=placeholder,
position=position)
if msg.type == "Mysqlx.Crud.Find":
msg_limit_expr["offset"] = Message("Mysqlx.Expr.Expr",
type=placeholder,
position=position + 1)
msg["limit_expr"] = msg_limit_expr
oneof_type, oneof_op = CRUD_PREPARE_MAPPING[msg_type]
msg_oneof = Message("Mysqlx.Prepare.Prepare.OneOfMessage")
msg_oneof["type"] = mysqlxpb_enum(oneof_type)
msg_oneof[oneof_op] = msg
msg_prepare = Message("Mysqlx.Prepare.Prepare")
msg_prepare["stmt_id"] = stmt.stmt_id
msg_prepare["stmt"] = msg_oneof
self._writer.write_message(
mysqlxpb_enum("Mysqlx.ClientMessages.Type.PREPARE_PREPARE"),
msg_prepare)
try:
self.read_ok()
except InterfaceError:
raise NotSupportedError
def send_prepare_execute(self, msg_type, msg, stmt):
"""
Send execute statement.
Args:
msg_type (str): Message ID string.
msg (mysqlx.protobuf.Message): MySQL X Protobuf Message.
stmt (Statement): A `Statement` based type object.
.. versionadded:: 8.0.16
"""
oneof_type, oneof_op = CRUD_PREPARE_MAPPING[msg_type]
msg_oneof = Message("Mysqlx.Prepare.Prepare.OneOfMessage")
msg_oneof["type"] = mysqlxpb_enum(oneof_type)
msg_oneof[oneof_op] = msg
msg_execute = Message("Mysqlx.Prepare.Execute")
msg_execute["stmt_id"] = stmt.stmt_id
args = self._get_binding_args(stmt, is_scalar=False)
if args:
msg_execute["args"].extend(args)
if stmt.has_limit:
msg_execute["args"].extend([
self._create_any(stmt.get_limit_row_count()).get_message(),
self._create_any(stmt.get_limit_offset()).get_message()
])
self._writer.write_message(
mysqlxpb_enum("Mysqlx.ClientMessages.Type.PREPARE_EXECUTE"),
msg_execute)
def send_prepare_deallocate(self, stmt_id):
"""
Send prepare deallocate statement.
Args:
stmt_id (int): Statement ID.
.. versionadded:: 8.0.16
"""
msg_dealloc = Message("Mysqlx.Prepare.Deallocate")
msg_dealloc["stmt_id"] = stmt_id
self._writer.write_message(
mysqlxpb_enum("Mysqlx.ClientMessages.Type.PREPARE_DEALLOCATE"),
msg_dealloc)
self.read_ok()
def send_msg_without_ps(self, msg_type, msg, stmt):
"""
Send a message without prepared statements support.
Args:
msg_type (str): Message ID string.
msg (mysqlx.protobuf.Message): MySQL X Protobuf Message.
stmt (Statement): A `Statement` based type object.
.. versionadded:: 8.0.16
"""
if stmt.has_limit:
msg_limit = Message("Mysqlx.Crud.Limit")
msg_limit["row_count"] = stmt.get_limit_row_count()
if msg.type == "Mysqlx.Crud.Find":
msg_limit["offset"] = stmt.get_limit_offset()
msg["limit"] = msg_limit
is_scalar = False \
if msg_type == "Mysqlx.ClientMessages.Type.SQL_STMT_EXECUTE" \
else True
args = self._get_binding_args(stmt, is_scalar=is_scalar)
if args:
msg["args"].extend(args)
self.send_msg(msg_type, msg)
def send_msg(self, msg_type, msg):
"""
Send a message.
Args:
msg_type (str): Message ID string.
msg (mysqlx.protobuf.Message): MySQL X Protobuf Message.
.. versionadded:: 8.0.16
"""
self._writer.write_message(mysqlxpb_enum(msg_type), msg)
def build_find(self, stmt):
"""Build find/read message.
Args:
stmt (Statement): A :class:`mysqlx.ReadStatement` or
:class:`mysqlx.FindStatement` object.
Returns:
(tuple): Tuple containing:
* `str`: Message ID string.
* :class:`mysqlx.protobuf.Message`: MySQL X Protobuf Message.
.. versionadded:: 8.0.16
"""
data_model = mysqlxpb_enum("Mysqlx.Crud.DataModel.DOCUMENT"
if stmt.is_doc_based() else
"Mysqlx.Crud.DataModel.TABLE")
collection = Message("Mysqlx.Crud.Collection",
name=stmt.target.name,
schema=stmt.schema.name)
msg = Message("Mysqlx.Crud.Find", data_model=data_model,
collection=collection)
if stmt.has_projection:
msg["projection"] = stmt.get_projection_expr()
self._apply_filter(msg, stmt)
if stmt.is_lock_exclusive():
msg["locking"] = \
mysqlxpb_enum("Mysqlx.Crud.Find.RowLock.EXCLUSIVE_LOCK")
elif stmt.is_lock_shared():
msg["locking"] = \
mysqlxpb_enum("Mysqlx.Crud.Find.RowLock.SHARED_LOCK")
if stmt.lock_contention > 0:
msg["locking_options"] = stmt.lock_contention
return "Mysqlx.ClientMessages.Type.CRUD_FIND", msg
def build_update(self, stmt):
"""Build update message.
Args:
stmt (Statement): A :class:`mysqlx.ModifyStatement` or
:class:`mysqlx.UpdateStatement` object.
Returns:
(tuple): Tuple containing:
* `str`: Message ID string.
* :class:`mysqlx.protobuf.Message`: MySQL X Protobuf Message.
.. versionadded:: 8.0.16
"""
data_model = mysqlxpb_enum("Mysqlx.Crud.DataModel.DOCUMENT"
if stmt.is_doc_based() else
"Mysqlx.Crud.DataModel.TABLE")
collection = Message("Mysqlx.Crud.Collection",
name=stmt.target.name,
schema=stmt.schema.name)
msg = Message("Mysqlx.Crud.Update", data_model=data_model,
collection=collection)
self._apply_filter(msg, stmt)
for _, update_op in stmt.get_update_ops().items():
operation = Message("Mysqlx.Crud.UpdateOperation")
operation["operation"] = update_op.update_type
operation["source"] = update_op.source
if update_op.value is not None:
operation["value"] = build_expr(update_op.value)
msg["operation"].extend([operation.get_message()])
return "Mysqlx.ClientMessages.Type.CRUD_UPDATE", msg
def build_delete(self, stmt):
"""Build delete message.
Args:
stmt (Statement): A :class:`mysqlx.DeleteStatement` or
:class:`mysqlx.RemoveStatement` object.
Returns:
(tuple): Tuple containing:
* `str`: Message ID string.
* :class:`mysqlx.protobuf.Message`: MySQL X Protobuf Message.
.. versionadded:: 8.0.16
"""
data_model = mysqlxpb_enum("Mysqlx.Crud.DataModel.DOCUMENT"
if stmt.is_doc_based() else
"Mysqlx.Crud.DataModel.TABLE")
collection = Message("Mysqlx.Crud.Collection", name=stmt.target.name,
schema=stmt.schema.name)
msg = Message("Mysqlx.Crud.Delete", data_model=data_model,
collection=collection)
self._apply_filter(msg, stmt)
return "Mysqlx.ClientMessages.Type.CRUD_DELETE", msg
def build_execute_statement(self, namespace, stmt, args):
"""Build execute statement.
Args:
namespace (str): The namespace.
stmt (Statement): A `Statement` based type object.
args (iterable): An iterable object.
Returns:
(tuple): Tuple containing:
* `str`: Message ID string.
* :class:`mysqlx.protobuf.Message`: MySQL X Protobuf Message.
.. versionadded:: 8.0.16
"""
msg = Message("Mysqlx.Sql.StmtExecute", namespace=namespace, stmt=stmt,
compact_metadata=False)
if namespace == "mysqlx":
# mysqlx namespace behavior: one object with a list of arguments
items = args[0].items() if isinstance(args, (list, tuple)) else \
args.items()
obj_flds = []
for key, value in items:
obj_fld = Message("Mysqlx.Datatypes.Object.ObjectField",
key=key, value=self._create_any(value))
obj_flds.append(obj_fld.get_message())
msg_obj = Message("Mysqlx.Datatypes.Object", fld=obj_flds)
msg_any = Message("Mysqlx.Datatypes.Any", type=2, obj=msg_obj)
msg["args"] = [msg_any.get_message()]
else:
# xplugin namespace behavior: list of arguments
for arg in args:
value = self._create_any(arg)
msg["args"].extend([value.get_message()])
return "Mysqlx.ClientMessages.Type.SQL_STMT_EXECUTE", msg
def build_insert(self, stmt):
"""Build insert statement.
Args:
stmt (Statement): A :class:`mysqlx.AddStatement` or
:class:`mysqlx.InsertStatement` object.
Returns:
(tuple): Tuple containing:
* `str`: Message ID string.
* :class:`mysqlx.protobuf.Message`: MySQL X Protobuf Message.
.. versionadded:: 8.0.16
"""
data_model = mysqlxpb_enum("Mysqlx.Crud.DataModel.DOCUMENT"
if stmt.is_doc_based() else
"Mysqlx.Crud.DataModel.TABLE")
collection = Message("Mysqlx.Crud.Collection",
name=stmt.target.name,
schema=stmt.schema.name)
msg = Message("Mysqlx.Crud.Insert", data_model=data_model,
collection=collection)
if hasattr(stmt, "_fields"):
for field in stmt._fields:
expr = ExprParser(field, not stmt.is_doc_based()) \
.parse_table_insert_field()
msg["projection"].extend([expr.get_message()])
for value in stmt.get_values():
row = Message("Mysqlx.Crud.Insert.TypedRow")
if isinstance(value, list):
for val in value:
row["field"].extend([build_expr(val).get_message()])
else:
row["field"].extend([build_expr(value).get_message()])
msg["row"].extend([row.get_message()])
if hasattr(stmt, "is_upsert"):
msg["upsert"] = stmt.is_upsert()
return "Mysqlx.ClientMessages.Type.CRUD_INSERT", msg
def close_result(self, result):
"""Close the result.
Args:
result (Result): A `Result` based type object.
Raises:
:class:`mysqlx.OperationalError`: If message read is None.
"""
msg = self._read_message(result)
if msg is not None:
raise OperationalError("Expected to close the result")
def read_row(self, result):
"""Read row.
Args:
result (Result): A `Result` based type object.
"""
msg = self._read_message(result)
if msg is None:
return None
if msg.type == "Mysqlx.Resultset.Row":
return msg
self._reader.push_message(msg)
return None
def get_column_metadata(self, result):
"""Returns column metadata.
Args:
result (Result): A `Result` based type object.
Raises:
:class:`mysqlx.InterfaceError`: If unexpected message.
"""
columns = []
while True:
msg = self._read_message(result)
if msg is None:
break
if msg.type == "Mysqlx.Resultset.Row":
self._reader.push_message(msg)
break
if msg.type != "Mysqlx.Resultset.ColumnMetaData":
raise InterfaceError("Unexpected msg type")
col = Column(msg["type"], msg["catalog"], msg["schema"],
msg["table"], msg["original_table"],
msg["name"], msg["original_name"],
msg.get("length", 21),
msg.get("collation", 0),
msg.get("fractional_digits", 0),
msg.get("flags", 16),
msg.get("content_type"))
columns.append(col)
return columns
def read_ok(self):
"""Read OK.
Raises:
:class:`mysqlx.InterfaceError`: If unexpected message.
"""
msg = self._reader.read_message()
if msg.type == "Mysqlx.Error":
raise InterfaceError("Mysqlx.Error: {}".format(msg["msg"]),
errno=msg["code"])
if msg.type != "Mysqlx.Ok":
raise InterfaceError("Unexpected message encountered: {}"
"".format(msg["msg"]))
def send_connection_close(self):
"""Send connection close."""
msg = Message("Mysqlx.Connection.Close")
self._writer.write_message(mysqlxpb_enum(
"Mysqlx.ClientMessages.Type.CON_CLOSE"), msg)
def send_close(self):
"""Send close."""
msg = Message("Mysqlx.Session.Close")
self._writer.write_message(mysqlxpb_enum(
"Mysqlx.ClientMessages.Type.SESS_CLOSE"), msg)
def send_expect_open(self):
"""Send expectation."""
cond_key = mysqlxpb_enum(
"Mysqlx.Expect.Open.Condition.Key.EXPECT_FIELD_EXIST")
msg_oc = Message("Mysqlx.Expect.Open.Condition")
msg_oc["condition_key"] = cond_key
msg_oc["condition_value"] = "6.1"
msg_eo = Message("Mysqlx.Expect.Open")
msg_eo['cond'] = [msg_oc.get_message()]
self._writer.write_message(mysqlxpb_enum(
"Mysqlx.ClientMessages.Type.EXPECT_OPEN"), msg_eo)
def send_reset(self, keep_open=None):
"""Send reset session message.
Returns:
boolean: ``True`` if the server will keep the session open,
otherwise ``False``.
"""
msg = Message("Mysqlx.Session.Reset")
if keep_open is None:
try:
# Send expectation: keep connection open
self.send_expect_open()
self.read_ok()
keep_open = True
except InterfaceError:
# Expectation is unkown by this version of the server
keep_open = False
if keep_open:
msg["keep_open"] = True
self._writer.write_message(mysqlxpb_enum(
"Mysqlx.ClientMessages.Type.SESS_RESET"), msg)
self.read_ok()
if keep_open:
return True
return False
| 38.488162 | 80 | 0.564486 |
e5022b5020c48f943d3e6a85dc7a28b58a4fcd96 | 784 | py | Python | docker_engine/komand_docker_engine/actions/container_logs/action.py | killstrelok/insightconnect-plugins | 911358925f4233ab273dbd8172e8b7b9188ebc01 | [
"MIT"
] | 1 | 2020-03-18T09:14:55.000Z | 2020-03-18T09:14:55.000Z | docker_engine/komand_docker_engine/actions/container_logs/action.py | killstrelok/insightconnect-plugins | 911358925f4233ab273dbd8172e8b7b9188ebc01 | [
"MIT"
] | 1 | 2021-02-23T23:57:37.000Z | 2021-02-23T23:57:37.000Z | docker_engine/komand_docker_engine/actions/container_logs/action.py | killstrelok/insightconnect-plugins | 911358925f4233ab273dbd8172e8b7b9188ebc01 | [
"MIT"
] | null | null | null | import komand
from .schema import ContainerLogsInput, ContainerLogsOutput
# Custom imports below
import docker
class ContainerLogs(komand.Action):
def __init__(self):
super(self.__class__, self).__init__(
name='container_logs',
description='Retrieve container logs',
input=ContainerLogsInput(),
output=ContainerLogsOutput())
def run(self, params={}):
try:
container = self.connection.docker_client.containers.get(params.get('id'))
logs = str(container.logs())
except (docker.errors.DockerException, docker.errors.APIError):
raise
else:
return {'logs': logs}
def test(self):
"""TODO: Test action"""
return {}
| 29.037037 | 86 | 0.604592 |
07205651b071eb6a9819ed201ff07b3ab6dca584 | 435 | py | Python | TAKfreeServer/runModelTest.py | benking84/FreeTakServer | d8f3c89e1b41d6f0ca61c0a08768d97d3e4a3f42 | [
"MIT"
] | null | null | null | TAKfreeServer/runModelTest.py | benking84/FreeTakServer | d8f3c89e1b41d6f0ca61c0a08768d97d3e4a3f42 | [
"MIT"
] | null | null | null | TAKfreeServer/runModelTest.py | benking84/FreeTakServer | d8f3c89e1b41d6f0ca61c0a08768d97d3e4a3f42 | [
"MIT"
] | null | null | null | from Controllers.RequestCOTController import RequestCOTController
import xml.etree.ElementTree as et
#below enter the comm type and necesary parameters in arguments
#aEvent = RequestCOTController().chat(chatType = 'chatToAll', senderCallsign = 'a', chatroom = 'b', groupOwner = 'c', id = 'd', parent = 'e', uid0 = 'f', uid1 = 'g', chatgrpid = 'd')
aEvent = RequestCOTController().ping(lat = 123, lon = 456, hae = 789)
print('over')
| 48.333333 | 182 | 0.717241 |
018ecc35ceb3840999b3bd671d865fc89a2158b4 | 7,435 | py | Python | lib/rucio/tests/test_rse_lfn2path.py | fno2010/rucio | 47e93cfbe5887071c70de4ba815c1bbdddfac2ce | [
"Apache-2.0"
] | null | null | null | lib/rucio/tests/test_rse_lfn2path.py | fno2010/rucio | 47e93cfbe5887071c70de4ba815c1bbdddfac2ce | [
"Apache-2.0"
] | null | null | null | lib/rucio/tests/test_rse_lfn2path.py | fno2010/rucio | 47e93cfbe5887071c70de4ba815c1bbdddfac2ce | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright CERN since 2018
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import pytest
from rucio.common import config
from rucio.rse.protocols.protocol import RSEDeterministicTranslation
try:
# PY2
from ConfigParser import NoOptionError, NoSectionError
except ImportError:
# PY3
from configparser import NoOptionError, NoSectionError
@pytest.mark.noparallel(reason='uses pre-defined RSE, changes global configuration value')
class TestDeterministicTranslation(unittest.TestCase):
"""
Verify the deterministic translator.
"""
def setUp(self):
"""LFN2PFN: Creating RSEDeterministicTranslation instance"""
self.rse = 'Mock'
self.rse_attributes = {"rse": "Mock"}
self.protocol_attributes = {"protocol": "test"}
self.create_translator()
def create_translator(self):
"""Create a new RSEDeterministicTranslation for use with tests."""
self.translator = RSEDeterministicTranslation(self.rse, self.rse_attributes, self.protocol_attributes)
def test_hash(self):
"""LFN2PFN: Translate to path using a hash (Success)"""
self.rse_attributes['lfn2pfn_algorithm'] = 'hash'
self.create_translator()
assert self.translator.path("foo", "bar") == "foo/4e/99/bar"
def test_default_hash(self):
"""LFN2PFN: Translate to path using default algorithm (Success)"""
assert self.translator.path("foo", "bar") == "foo/4e/99/bar"
def test_identity(self):
"""LFN2PFN: Translate to path using identity (Success)"""
self.rse_attributes['lfn2pfn_algorithm'] = 'identity'
self.create_translator()
assert self.translator.path("foo", "bar") == "foo/bar"
def test_user_scope(self):
"""LFN2PFN: Test special user scope rules (Success)"""
assert self.translator.path("user.foo", "bar") == "user/foo/13/7f/bar"
def test_register_func(self):
"""LFN2PFN: Verify we can register a custom function (Success)"""
def static_register_test1(scope, name, rse, rse_attrs, proto_attrs):
"""Test function for registering LFN2PATH functions."""
del scope
del name
del rse
del rse_attrs
del proto_attrs
return "static_register_value1"
def static_register_test2(scope, name, rse, rse_attrs, proto_attrs):
"""Second test function for registering LFN2PATH functions."""
del scope
del name
del rse
del rse_attrs
del proto_attrs
return "static_register_value2"
RSEDeterministicTranslation.register(static_register_test1)
RSEDeterministicTranslation.register(static_register_test2, name="static_register_custom_name")
self.rse_attributes['lfn2pfn_algorithm'] = 'static_register_test1'
self.create_translator()
assert self.translator.path("foo", "bar") == "static_register_value1"
self.rse_attributes['lfn2pfn_algorithm'] = 'static_register_custom_name'
self.create_translator()
assert self.translator.path("foo", "bar") == "static_register_value2"
def test_attr_mapping(self):
"""LFN2PFN: Verify we can map using rse and attrs (Successs)"""
def rse_algorithm(scope, name, rse, rse_attrs, proto_attrs):
"""Test LFN2PATH function for exercising the different RSE/proto attrs."""
tier = rse_attrs.get("tier", "T1")
scheme = proto_attrs.get("scheme", "http")
return "%s://%s_%s/%s/%s" % (scheme, tier, rse, scope, name)
RSEDeterministicTranslation.register(rse_algorithm)
self.rse_attributes['lfn2pfn_algorithm'] = 'rse_algorithm'
self.create_translator()
assert self.translator.path("foo", "bar") == "http://T1_Mock/foo/bar"
self.rse_attributes['tier'] = 'T2'
self.create_translator()
assert self.translator.path("foo", "bar") == "http://T2_Mock/foo/bar"
self.protocol_attributes['scheme'] = 'https'
self.create_translator()
assert self.translator.path("foo", "bar") == "https://T2_Mock/foo/bar"
self.protocol_attributes['scheme'] = 'srm'
self.create_translator()
assert self.translator.path("foo", "bar") == "srm://T2_Mock/foo/bar"
def test_module_load(self):
"""LFN2PFN: Test ability to provide LFN2PFN functions via module (Success)"""
if not config.config_has_section('policy'):
config.config_add_section('policy')
config.config_set('policy', 'lfn2pfn_module', 'rucio.tests.lfn2pfn_module_test')
RSEDeterministicTranslation._module_init_() # pylint: disable=protected-access
self.rse_attributes['lfn2pfn_algorithm'] = 'lfn2pfn_module_algorithm'
self.create_translator()
assert self.translator.path("foo", "bar") == "lfn2pfn_module_algorithm_value"
def test_config_default_override(self):
"""LFN2PFN: Test override of default LFN2PFN algorithm via config (Success)"""
if not config.config_has_section('policy'):
config.config_add_section('policy')
try:
orig_value = config.config_get('policy', 'lfn2pfn_algorithm_default')
except (NoOptionError, NoSectionError):
orig_value = None
def static_test(scope, name, rse, rse_attrs, proto_attrs):
"""Static test function for config override."""
del scope
del name
del rse
del rse_attrs
del proto_attrs
return "static_test_value"
RSEDeterministicTranslation.register(static_test)
try:
config.config_set('policy', 'lfn2pfn_algorithm_default', 'static_test')
RSEDeterministicTranslation._module_init_() # pylint: disable=protected-access
assert self.translator.path("foo", "bar") == "static_test_value"
finally:
if orig_value is None:
config.config_remove_option('policy', 'lfn2pfn_algorithm_default')
else:
config.config_set('policy', 'lfn2pfn_algorithm_default', orig_value)
RSEDeterministicTranslation._module_init_() # pylint: disable=protected-access
def test_supports(self): # pylint: disable=no-self-use
"""LFN2PFN: See if the static `supports` method works"""
def static_test(scope, name, rse, rse_attrs, proto_attrs):
"""Static test function for testing supports."""
del scope
del name
del rse
del rse_attrs
del proto_attrs
return "static_test_value"
assert not RSEDeterministicTranslation.supports("static_supports")
RSEDeterministicTranslation.register(static_test, "static_supports")
assert RSEDeterministicTranslation.supports("static_supports")
| 42.976879 | 110 | 0.663753 |
ef42f35007d3b37b716ca625ada1ca98e52afd44 | 5,802 | py | Python | scripts/scrape_jobs_data.py | covega/enviro_papers | 97d8898372c711918df411fe0f76b0be3552654c | [
"MIT"
] | null | null | null | scripts/scrape_jobs_data.py | covega/enviro_papers | 97d8898372c711918df411fe0f76b0be3552654c | [
"MIT"
] | 17 | 2019-08-21T02:09:59.000Z | 2021-08-23T20:14:46.000Z | scripts/scrape_jobs_data.py | covega/enviro_papers | 97d8898372c711918df411fe0f76b0be3552654c | [
"MIT"
] | 1 | 2019-11-19T22:25:32.000Z | 2019-11-19T22:25:32.000Z | #!/usr/bin/env python
# coding: utf-8
import sys
import os, os.path
import requests
import time
import pandas as pd
import asyncio
from aiohttp import ClientSession
from bs4 import BeautifulSoup
from multiprocessing.pool import ThreadPool
METADATA_CSV = os.path.join(os.getcwd(), 'data/raw/jobs/jobs_metadata.csv')
OUTPUT_CSV = os.path.join(os.getcwd(), 'data/cleaned/jobs/all.csv')
JOB_STAT_KEYS = [
'countSolarJobs',
'countWindJobs',
'countEnergyJobs',
'totalJobs',
'percentOfStateJobs',
'residentialMWhInvested',
'commercialMWhInvested',
'utilityMWhInvested',
'totalMWhInvested',
'residentialDollarsInvested',
'commercialDollarsInvested',
'utilityDollarsInvested',
'totalDollarsInvested',
'investmentHomesEquivalent',
'countResidentialInstallations',
'countCommercialInstallations',
'countUtilityInstallations',
'countTotalInstallations',
'residentialMWCapacity',
'commercialMWCapacity',
'utilityMWCapacity',
'totalMWCapacity'
]
CSV_KEYS = [
'stateAbbr',
'geoType',
'name',
'geoid',
'sourceURL'
]
CSV_KEYS.extend(JOB_STAT_KEYS)
HTML_STRUCTURE = {
'tables': [
['countSolarJobs', 'countWindJobs', 'countEnergyJobs'],
['residentialDollarsInvested', 'residentialMWhInvested', 'commercialDollarsInvested', 'commercialMWhInvested', 'utilityDollarsInvested', 'utilityMWhInvested'],
['countResidentialInstallations', 'residentialMWCapacity', 'countCommercialInstallations', 'commercialMWCapacity', 'countUtilityInstallations', 'utilityMWCapacity'],
],
'totals': [
['totalJobs', 'percentOfStateJobs'],
['totalDollarsInvested', 'totalMWhInvested', 'investmentHomesEquivalent'],
['countTotalInstallations', 'totalMWCapacity']
]
}
REGION_TYPES = [
('state', 'State'),
('county', 'County'),
('sldu', 'State Senate District'),
('sldl', 'State House District'),
('cd', 'Congressional District')]
def scrape(metadata, attempt=1):
url = metadata['html_url']
_idx = metadata['_idx']
with requests.get(url) as response:
row = {
'stateAbbr': metadata['state_abbr'],
'geoid': metadata['geoid'],
'geoType': metadata['region_type'],
'name': metadata['name'],
'sourceURL': metadata['html_url'],
}
unique_key = url.replace('http://assessor.keva.la/cleanenergyprogress', '')
if attempt > 3:
print("%d: [%d/3] – %s – FAIL – %s" % (_idx, attempt, response.status_code, unique_key))
return None
if response.status_code >= 400:
print("%d: [%d/3] – %s – RETRY – %s" % (_idx, attempt, response.status_code, unique_key))
time.sleep(3)
return scrape(metadata, attempt + 1)
html = response.text
soup = BeautifulSoup(html, 'html5lib')
row['name'] = soup.find('span', id='geography__name').text.strip()
outer_divs = soup.find_all('div', class_='analytics_data')
for keylist, outerdiv in zip(HTML_STRUCTURE['tables'], outer_divs):
tds = outerdiv.find_all('td', class_='table_data')
values = [elem.text.strip() for elem in tds[:len(keylist)]]
for idx, key in enumerate(keylist):
row[key] = values[idx]
li_buckets = soup.find_all('li', class_=None)
if len(li_buckets) != 3:
print("%d: [%d/3] – %s – PARSE – %s" % (_idx, attempt, response.status_code, unique_key))
print("li_buckets:", li_buckets)
print(html)
raise ValueError
for keylist, outerli in zip(HTML_STRUCTURE['totals'], li_buckets):
total_spans = outerli.find_all('span', class_='analytics_total_num')
totals = [elem.text.strip() for elem in total_spans]
if metadata['region_type'] == 'state' and keylist[-1] == 'percentOfStateJobs':
keylist = keylist [:-1]
if len(totals) == 0:
for key in keylist:
row[key] = 0
elif len(totals) != len(keylist):
print("%d: [%d/3] – %s – PARSE – %s" % (_idx, attempt, response.status_code, unique_key))
print("totals:", totals, keylist)
print(html)
raise ValueError
else:
for idx, key in enumerate(keylist):
row[key] = totals[idx]
print("%d: [%d/3] – %s – OK – %s" % (_idx, attempt, response.status_code, unique_key))
return row
def scrape_jobs_data():
jobs_data = None
if os.path.exists(OUTPUT_CSV):
jobs_data = pd.read_csv(OUTPUT_CSV, encoding='ISO-8859-1')
else:
jobs_data = pd.DataFrame(columns=CSV_KEYS)
jobs_metadata = [x for _, x in pd.read_csv(METADATA_CSV, encoding='ISO-8859-1').iterrows()]
processed_urls = set(jobs_data['sourceURL'])
batch = []
batch_size = 100
for i, metadata_row in enumerate(jobs_metadata):
url = jobs_metadata[i]['html_url']
if url in processed_urls:
print ("Skipped: %d" % i, end='\r')
if i != len(jobs_metadata) - 1:
continue
if url not in processed_urls:
metadata_row['_idx'] = i
batch.append(metadata_row)
if len(batch) >= batch_size or i == len(jobs_metadata) - 1:
print("\nStarting Batch")
results = ThreadPool(20).imap_unordered(scrape, batch)
for data_row in results:
jobs_data = jobs_data.append(data_row, ignore_index=True)
jobs_data.to_csv(OUTPUT_CSV)
batch = []
print("Wrote to disk.")
jobs_data.to_csv(OUTPUT_CSV)
if __name__ == '__main__':
scrape_jobs_data()
| 33.344828 | 173 | 0.607377 |
776ad4202b1a8baa32c5a633837b06810175d85e | 3,030 | py | Python | venv/Lib/site-packages/sqlalchemy/inspection.py | YunJaePark3908/BaseAPIServer | 17ab922917541406a3c2d75b428614ce97152a16 | [
"Apache-2.0"
] | 2 | 2021-02-20T22:43:47.000Z | 2021-05-06T03:43:20.000Z | venv/Lib/site-packages/sqlalchemy/inspection.py | YunJaePark3908/BaseAPIServer | 17ab922917541406a3c2d75b428614ce97152a16 | [
"Apache-2.0"
] | 8 | 2021-03-26T19:13:07.000Z | 2021-04-19T18:34:33.000Z | venv/Lib/site-packages/sqlalchemy/inspection.py | YunJaePark3908/BaseAPIServer | 17ab922917541406a3c2d75b428614ce97152a16 | [
"Apache-2.0"
] | 3 | 2021-11-30T11:10:26.000Z | 2021-12-08T05:59:31.000Z | # sqlalchemy/inspect.py
# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""The inspection module provides the :func:`_sa.inspect` function,
which delivers runtime information about a wide variety
of SQLAlchemy objects, both within the Core as well as the
ORM.
The :func:`_sa.inspect` function is the entry point to SQLAlchemy's
public API for viewing the configuration and construction
of in-memory objects. Depending on the type of object
passed to :func:`_sa.inspect`, the return value will either be
a related object which provides a known interface, or in many
cases it will return the object itself.
The rationale for :func:`_sa.inspect` is twofold. One is that
it replaces the need to be aware of a large variety of "information
getting" functions in SQLAlchemy, such as
:meth:`_reflection.Inspector.from_engine`,
:func:`.orm.attributes.instance_state`, :func:`_orm.class_mapper`,
and others. The other is that the return value of :func:`_sa.inspect`
is guaranteed to obey a documented API, thus allowing third party
tools which build on top of SQLAlchemy configurations to be constructed
in a forwards-compatible way.
"""
from . import exc
from . import util
_registrars = util.defaultdict(list)
def inspect(subject, raiseerr=True):
"""Produce an inspection object for the given target.
The returned value in some cases may be the
same object as the one given, such as if a
:class:`_orm.Mapper` object is passed. In other
cases, it will be an instance of the registered
inspection type for the given object, such as
if an :class:`_engine.Engine` is passed, an
:class:`_reflection.Inspector` object is returned.
:param subject: the subject to be inspected.
:param raiseerr: When ``True``, if the given subject
does not
correspond to a known SQLAlchemy inspected type,
:class:`sqlalchemy.exc.NoInspectionAvailable`
is raised. If ``False``, ``None`` is returned.
"""
type_ = type(subject)
for cls in type_.__mro__:
if cls in _registrars:
reg = _registrars[cls]
if reg is True:
return subject
ret = reg(subject)
if ret is not None:
break
else:
reg = ret = None
if raiseerr and (reg is None or ret is None):
raise exc.NoInspectionAvailable(
"No inspection system is "
"available for object of type %s" % type_
)
return ret
def _inspects(*types):
def decorate(fn_or_cls):
for type_ in types:
if type_ in _registrars:
raise AssertionError(
"Type %s is already " "registered" % type_
)
_registrars[type_] = fn_or_cls
return fn_or_cls
return decorate
def _self_inspects(cls):
_inspects(cls)(True)
return cls
| 32.234043 | 72 | 0.684488 |
01a165d3a81f6c65f2375ff208ce3657c9a28f7b | 636 | py | Python | Day32/ex2.py | SSRout/100-days-of-code | 7aafa7789a57bf701b60043fa2bf8fb61b64bfb5 | [
"MIT"
] | null | null | null | Day32/ex2.py | SSRout/100-days-of-code | 7aafa7789a57bf701b60043fa2bf8fb61b64bfb5 | [
"MIT"
] | null | null | null | Day32/ex2.py | SSRout/100-days-of-code | 7aafa7789a57bf701b60043fa2bf8fb61b64bfb5 | [
"MIT"
] | null | null | null | #Monday Motivation Project
import smtplib
import datetime as dt
import random
MY_EMAIL = "test mail"
MY_PASSWORD = "testkey"
now = dt.datetime.now()
weekday = now.weekday()
if weekday == 0:
with open("Day32/quotes.txt") as quote_file:
all_quotes = quote_file.readlines()
quote = random.choice(all_quotes)
print(quote)
with smtplib.SMTP("smtp.gmail.com") as connection:
connection.starttls()
connection.login(MY_EMAIL, MY_PASSWORD)
connection.sendmail(
from_addr=MY_EMAIL,
to_addrs=MY_EMAIL,
msg=f"Subject:Monday Motivation\n\n{quote}"
)
| 25.44 | 55 | 0.658805 |
45b6ebf3e23f1bd09c9451ce002c60c80d93154a | 4,566 | py | Python | memsource_cli/models/quality_assurance_response_dto.py | unofficial-memsource/memsource-cli-client | a6639506b74e95476da87f4375953448b76ea90c | [
"Apache-2.0"
] | 16 | 2019-09-25T00:20:38.000Z | 2021-05-04T05:56:10.000Z | memsource_cli/models/quality_assurance_response_dto.py | zerodayz/memsource-cli-client | c2574f1467539a49e6637c874e88d75c7ef789b3 | [
"Apache-2.0"
] | 26 | 2019-09-30T14:00:03.000Z | 2021-05-12T11:15:18.000Z | memsource_cli/models/quality_assurance_response_dto.py | zerodayz/memsource-cli-client | c2574f1467539a49e6637c874e88d75c7ef789b3 | [
"Apache-2.0"
] | 1 | 2021-05-24T16:19:14.000Z | 2021-05-24T16:19:14.000Z | # coding: utf-8
"""
Memsource REST API
Welcome to Memsource's API documentation. To view our legacy APIs please [visit our documentation](https://wiki.memsource.com/wiki/Memsource_API) and for more information about our new APIs, [visit our blog](https://www.memsource.com/blog/2017/10/24/introducing-rest-apis-qa-with-the-memsource-api-team/). If you have any questions, please contact [Memsource Support](<mailto:support@memsource.com>). # noqa: E501
OpenAPI spec version: Latest
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from memsource_cli.models.segment_warnings_dto import SegmentWarningsDto # noqa: F401,E501
class QualityAssuranceResponseDto(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'segment_warnings': 'list[SegmentWarningsDto]',
'finished': 'bool'
}
attribute_map = {
'segment_warnings': 'segmentWarnings',
'finished': 'finished'
}
def __init__(self, segment_warnings=None, finished=None): # noqa: E501
"""QualityAssuranceResponseDto - a model defined in Swagger""" # noqa: E501
self._segment_warnings = None
self._finished = None
self.discriminator = None
if segment_warnings is not None:
self.segment_warnings = segment_warnings
if finished is not None:
self.finished = finished
@property
def segment_warnings(self):
"""Gets the segment_warnings of this QualityAssuranceResponseDto. # noqa: E501
:return: The segment_warnings of this QualityAssuranceResponseDto. # noqa: E501
:rtype: list[SegmentWarningsDto]
"""
return self._segment_warnings
@segment_warnings.setter
def segment_warnings(self, segment_warnings):
"""Sets the segment_warnings of this QualityAssuranceResponseDto.
:param segment_warnings: The segment_warnings of this QualityAssuranceResponseDto. # noqa: E501
:type: list[SegmentWarningsDto]
"""
self._segment_warnings = segment_warnings
@property
def finished(self):
"""Gets the finished of this QualityAssuranceResponseDto. # noqa: E501
:return: The finished of this QualityAssuranceResponseDto. # noqa: E501
:rtype: bool
"""
return self._finished
@finished.setter
def finished(self, finished):
"""Sets the finished of this QualityAssuranceResponseDto.
:param finished: The finished of this QualityAssuranceResponseDto. # noqa: E501
:type: bool
"""
self._finished = finished
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(QualityAssuranceResponseDto, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, QualityAssuranceResponseDto):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 31.708333 | 421 | 0.618484 |
335c9b7b72bc5d95d0af505d11f3a4b9b1e2e7c1 | 6,005 | py | Python | test/neo4j_graphdatabaseservice_match.py | fabsx00/py2neo | 80f6605499ee4cec4b338f15453e8f509a09468a | [
"Apache-2.0"
] | null | null | null | test/neo4j_graphdatabaseservice_match.py | fabsx00/py2neo | 80f6605499ee4cec4b338f15453e8f509a09468a | [
"Apache-2.0"
] | null | null | null | test/neo4j_graphdatabaseservice_match.py | fabsx00/py2neo | 80f6605499ee4cec4b338f15453e8f509a09468a | [
"Apache-2.0"
] | 1 | 2021-10-08T03:41:54.000Z | 2021-10-08T03:41:54.000Z | #/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2011-2014, Nigel Small
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from py2neo import neo4j
class MatchTestCase(unittest.TestCase):
def setUp(self):
self.graph_db = neo4j.GraphDatabaseService()
self.graph_db.clear()
stuff = self.graph_db.create(
{"name": "Alice"},
{"name": "Bob"},
{"name": "Carol"},
(0, "LOVES", 1),
(1, "LOVES", 0),
(0, "KNOWS", 1),
(1, "KNOWS", 0),
(1, "KNOWS", 2),
(2, "KNOWS", 1),
)
self.alice, self.bob, self.carol = stuff[0:3]
def test_can_match_all(self):
rels = self.graph_db.match()
assert len(rels) == 6
def test_will_return_empty_list_on_no_match(self):
rels = self.graph_db.match(rel_type="HATES")
assert len(rels) == 0
def test_can_match_start_node(self):
rels = self.graph_db.match(start_node=self.alice)
assert len(rels) == 2
assert "KNOWS" in [rel.type for rel in rels]
assert "LOVES" in [rel.type for rel in rels]
assert self.bob in [rel.end_node for rel in rels]
def test_can_match_type_only(self):
rels = self.graph_db.match(rel_type="LOVES")
assert len(rels) == 2
assert self.alice in [rel.start_node for rel in rels]
assert self.bob in [rel.start_node for rel in rels]
assert self.alice in [rel.end_node for rel in rels]
assert self.bob in [rel.end_node for rel in rels]
def test_can_match_start_node_and_type(self):
rels = self.graph_db.match(start_node=self.alice, rel_type="KNOWS")
assert len(rels) == 1
assert self.bob in [rel.end_node for rel in rels]
def test_can_match_start_node_and_end_node(self):
rels = self.graph_db.match(start_node=self.alice, end_node=self.bob)
assert len(rels) == 2
assert "KNOWS" in [rel.type for rel in rels]
assert "LOVES" in [rel.type for rel in rels]
def test_can_match_type_and_end_node(self):
rels = self.graph_db.match(rel_type="KNOWS", end_node=self.bob)
assert len(rels) == 2
assert self.alice in [rel.start_node for rel in rels]
assert self.carol in [rel.start_node for rel in rels]
def test_can_bidi_match_start_node(self):
rels = self.graph_db.match(start_node=self.bob, bidirectional=True)
assert len(rels) == 6
assert "KNOWS" in [rel.type for rel in rels]
assert "LOVES" in [rel.type for rel in rels]
assert self.alice in [rel.start_node for rel in rels]
assert self.bob in [rel.start_node for rel in rels]
assert self.carol in [rel.start_node for rel in rels]
assert self.alice in [rel.end_node for rel in rels]
assert self.bob in [rel.end_node for rel in rels]
assert self.carol in [rel.end_node for rel in rels]
def test_can_bidi_match_start_node_and_type(self):
rels = self.graph_db.match(start_node=self.bob, rel_type="KNOWS", bidirectional=True)
assert len(rels) == 4
assert self.alice in [rel.start_node for rel in rels]
assert self.bob in [rel.start_node for rel in rels]
assert self.carol in [rel.start_node for rel in rels]
assert self.alice in [rel.end_node for rel in rels]
assert self.bob in [rel.end_node for rel in rels]
assert self.carol in [rel.end_node for rel in rels]
def test_can_bidi_match_start_node_and_end_node(self):
rels = self.graph_db.match(start_node=self.alice, end_node=self.bob, bidirectional=True)
assert len(rels) == 4
assert "KNOWS" in [rel.type for rel in rels]
assert "LOVES" in [rel.type for rel in rels]
assert self.alice in [rel.start_node for rel in rels]
assert self.bob in [rel.start_node for rel in rels]
assert self.alice in [rel.end_node for rel in rels]
assert self.bob in [rel.end_node for rel in rels]
def test_can_bidi_match_type_and_end_node(self):
rels = self.graph_db.match(rel_type="KNOWS", end_node=self.bob, bidirectional=True)
assert len(rels) == 4
assert self.alice in [rel.start_node for rel in rels]
assert self.carol in [rel.start_node for rel in rels]
assert self.alice in [rel.start_node for rel in rels]
assert self.bob in [rel.start_node for rel in rels]
assert self.carol in [rel.start_node for rel in rels]
assert self.alice in [rel.end_node for rel in rels]
assert self.bob in [rel.end_node for rel in rels]
assert self.carol in [rel.end_node for rel in rels]
def test_can_match_with_limit(self):
rels = self.graph_db.match(limit=3)
assert len(rels) == 3
def test_can_match_one(self):
rel = self.graph_db.match_one()
assert isinstance(rel, neo4j.Relationship)
def test_can_match_none(self):
rels = self.graph_db.match(rel_type="HATES", limit=1)
assert len(rels) == 0
def test_can_match_multiple_types(self):
rels = self.graph_db.match(rel_type=("LOVES", "KNOWS"))
assert len(rels) == 6
def test_can_match_start_node_and_multiple_types(self):
rels = self.graph_db.match(start_node=self.alice, rel_type=("LOVES",
"KNOWS"))
assert len(rels) == 2
if __name__ == '__main__':
unittest.main()
| 40.033333 | 96 | 0.64746 |
fa6d34758fbe55bf7d789ef7cff1409700b82689 | 14,048 | py | Python | reports/heliosV2/python/heliosFailureReport/heliosFailureReport.py | ped998/scripts | 0dcaaf47f9676210e1c972a5d59d8d0de82a1d93 | [
"Apache-2.0"
] | null | null | null | reports/heliosV2/python/heliosFailureReport/heliosFailureReport.py | ped998/scripts | 0dcaaf47f9676210e1c972a5d59d8d0de82a1d93 | [
"Apache-2.0"
] | null | null | null | reports/heliosV2/python/heliosFailureReport/heliosFailureReport.py | ped998/scripts | 0dcaaf47f9676210e1c972a5d59d8d0de82a1d93 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
"""Helios v2 Failure Report"""
# import pyhesity wrapper module
from pyhesity import *
from datetime import datetime, timedelta
import codecs
# command line arguments
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-v', '--vip', type=str, default='helios.cohesity.com')
parser.add_argument('-u', '--username', type=str, default='helios')
parser.add_argument('-s', '--startdate', type=str, default='')
parser.add_argument('-e', '--enddate', type=str, default='')
parser.add_argument('-t', '--thismonth', action='store_true')
parser.add_argument('-l', '--lastmonth', action='store_true')
parser.add_argument('-y', '--days', type=int, default=31)
args = parser.parse_args()
vip = args.vip
username = args.username
startdate = args.startdate
enddate = args.enddate
thismonth = args.thismonth
lastmonth = args.lastmonth
days = args.days
filePrefix = "heliosFailureReport"
title = "Helios Failure Report"
reportNumber = 900
headings = '''Cluster Name
Group Name
Source
Object
Environment
Policy
Last Failure
Failed Backups
Strikes
Last Error'''.split('\n')
# authenticate
apiauth(vip=vip, username=username, domain='local', helios=True)
now = datetime.now()
dateString = dateToString(now, "%Y-%m-%d")
thisCalendarMonth = now.replace(day=1, hour=0, minute=0, second=0)
endofLastMonth = thisCalendarMonth - timedelta(seconds=1)
lastCalendarMonth = endofLastMonth.replace(day=1, hour=0, minute=0, second=0)
if startdate != '' and enddate != '':
uStart = dateToUsecs(startdate)
uEnd = dateToUsecs(enddate)
elif thismonth:
uStart = dateToUsecs(thisCalendarMonth)
uEnd = dateToUsecs(now)
elif lastmonth:
uStart = dateToUsecs(lastCalendarMonth)
uEnd = dateToUsecs(endofLastMonth)
else:
uStart = timeAgo(days, 'days')
uEnd = dateToUsecs(now)
start = usecsToDate(uStart, '%Y-%m-%d')
end = usecsToDate(uEnd, '%Y-%m-%d')
# build 180 day time ranges
ranges = []
gotAllRanges = False
thisUend = uEnd
thisUstart = uStart
while gotAllRanges is False:
if (thisUend - uStart) > 15552000000000:
thisUstart = thisUend - 15552000000000
ranges.append({'start': thisUstart, 'end': thisUend})
thisUend = thisUstart - 1
else:
ranges.append({'start': uStart, 'end': thisUend})
gotAllRanges = True
tsvHeadings = '\t'.join(headings)
csvHeadings = ','.join(headings)
htmlHeadings = ''.join(['<th>%s</th>' % h for h in headings])
# TSV output
tsvFileName = "%s_%s_%s.txt" % (filePrefix, start, end)
tsv = codecs.open(tsvFileName, 'w', 'utf-8')
tsv.write('%s\n' % tsvHeadings)
# CSV output
csvFileName = "%s_%s_%s.csv" % (filePrefix, start, end)
csv = codecs.open(csvFileName, 'w', 'utf-8')
csv.write('%s\n' % csvHeadings)
# HTML output
htmlFileName = "%s_%s_%s.html" % (filePrefix, start, end)
htmlFile = codecs.open(htmlFileName, 'w', 'utf-8')
html = '''<html>
<head>
<style>
p {
color: #555555;
font-family:Arial, Helvetica, sans-serif;
}
span {
color: #555555;
font-family:Arial, Helvetica, sans-serif;
}
table {
font-family: Arial, Helvetica, sans-serif;
color: #333333;
font-size: 0.75em;
border-collapse: collapse;
width: 100%;
}
tr {
border: 1px solid #F8F8F8;
background-color: #F8F8F8;
}
td {
width: 25ch;
max-width: 250px;
text-align: left;
padding: 10px;
word-wrap:break-word;
white-space:normal;
}
td.nowrap {
width: 25ch;
max-width: 250px;
text-align: left;
padding: 10px;
padding-right: 15px;
word-wrap:break-word;
white-space:nowrap;
}
th {
width: 25ch;
max-width: 250px;
text-align: left;
padding: 6px;
white-space: nowrap;
}
</style>
</head>
<body>
<div style="margin:15px;">
<img src="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAALQAAAAaCAYAAAA
e23asAAAACXBIWXMAABcRAAAXEQHKJvM/AAABmWlUWHRYTUw6Y29tLmFkb2JlLnhtcAAAAA
AAPD94cGFja2V0IGJlZ2luPSfvu78nIGlkPSdXNU0wTXBDZWhpSHpyZVN6TlRjemtjOWQnP
z4KPHg6eG1wbWV0YSB4bWxuczp4PSdhZG9iZTpuczptZXRhLycgeDp4bXB0az0nSW1hZ2U6
OkV4aWZUb29sIDExLjcwJz4KPHJkZjpSREYgeG1sbnM6cmRmPSdodHRwOi8vd3d3LnczLm9
yZy8xOTk5LzAyLzIyLXJkZi1zeW50YXgtbnMjJz4KCiA8cmRmOkRlc2NyaXB0aW9uIHJkZj
phYm91dD0nJwogIHhtbG5zOnBob3Rvc2hvcD0naHR0cDovL25zLmFkb2JlLmNvbS9waG90b
3Nob3AvMS4wLyc+CiAgPHBob3Rvc2hvcDpDb2xvck1vZGU+MzwvcGhvdG9zaG9wOkNvbG9y
TW9kZT4KIDwvcmRmOkRlc2NyaXB0aW9uPgo8L3JkZjpSREY+CjwveDp4bXBtZXRhPgo8P3h
wYWNrZXQgZW5kPSdyJz8+enmf4AAAAIB6VFh0UmF3IHByb2ZpbGUgdHlwZSBpcHRjAAB4nG
WNMQqAMAxF95zCI7RJ/GlnJzcHb6AtCILi/QfbDnXwBz7h8eDTvKzTcD9XPs5EQwsCSVDWq
LvTcj0S/ObYx/KOysbwSNDOEjsIMgJGE0RTi1TQVpAhdy3/tc8yqV5bq630An5xJATDlSTX
AAAMJ0lEQVR42uWce5RVVR3HP/fOmWEYHJlQTC1AxJBViqiIj9QyEs1Kl5qSj3ysMovV09I
SNTHNpJWZlWbmo8zUfIGImiQ+Mh0fmYDvIMWREOQ1As6MM2fu7Y/vb8/d58y9d865MyODfd
c6i7nn7LN/+/Hbv/chQwxBEPg/twE+CRwKTARGAFsAGaAVWAY0ArOBh4ANAGEYUg4xGlsC+
wCTgd2BDwN1QA5YCywGHgMeBF62+2VpBEGwPXC89QPwX+DmMAxbSIggCEYAXwQG260m4JYw
DNvseQAcBuyYtM8SaAVmh2G4Mv5g2vwp8b3YFdgZGAZkE/TdAdx25eR5S2JzG2RzG2W32mx
uTfa8ATjR6PQFQmBWGIYvBUEwGjgWGGTPOoF7gWd74htv/ADV1s8Y79HqoEhDgAZrfAqwGw
XG8LEFMM6uo4G/A5cDDwVB0FlqcB6NwYghTgP2NppxjAb2BL4AvAHcDVwLLAqCoBxT7wVcb
JMGHbyHgf+k2IR9gZ8CVfZ7KTq0r9vvocAFwPgUfRZDCKxEQqELHjOPQMx1JDDW1r0qYd+d
NuclsftbA+cCO9nvnK1vk/0eC1xkc+wL5IF3gZfQgTyfgqAAOAg4LgiCVUmZGgnZXwMf8O4
t7DrlHqPtAfwZuAJtal2CzrcEPgfcAvwAqItJ4TiN0cBvgRuAQyjOzJFX7Z1vAXOAbwCDi9
EwVBOVYAHJmcB/p1wfWaDG/u3NFVg/XTBmztjazEKHcy/EYGnm8RbwXJH7VUbXIRP7Xcl6l
UPGm+OjwO2x5wcB3wSqyuypBqbnI4EZRJl5BXB21msEcDBwM5KcxXpuB9YBa5CqjGMrdPrO
BWpLDG48cCNwMsUPSxuwHtiIpFcco4CfI+k4pASNfG93oAT6o9+s368nmY8Arkcaqtg4klx
PAq9WMKYcyUyaSvAOkv4vxtZgGvCpci/aXtcAZyHB6xAi6+B+nxv2QVJzTKyfTuBfyM55Cl
huE94GSfCjkVniUAOcAaw2Ip3eYHYwGvvFaLQgk+U+m+jb6EBtb20Pp6AeQfbXd4BmYGY5E
+c9xhPAsynfabH19bEbcCmwnXdvHfA35LOspufDlbexvFvBPBYDFxKVgD4agOOAeo/WA3Q3
bRxCe+7wb+DHwDXIhAIJwxnA80EQvFlmP49AwtDHPOAqIGe+DcORvRhn5pVIGt6A1FccDyA
p8l3gaxQk7iDgs8DvgQ1GoxY4j+7M/KJNbi46vXHcYYP9nk2k1u5X272n0UYPBNwFXNLLPg
bZvHxncyGSSg8jLdnfWNPDPHZAWrzeu3cdMjnLIgxDJ9xmAQcgyZyxx/siXpoeBEHoM7W9M
wb4EYVDALL9zweawzDsMiuOt86JNfw6cI8/GB9GZBlwNmL4CxBT3gb8BpkNDgcDU2M0FgKn
4km1EjQWI4m8HJhOwUMeBpwJPBkEwfoBIqV7i10Qszi4fWhM08mVk+f15xiLmSOZpC8bU7c
DP0MBgYleH6chbT3XtfcE4nRbH4d2YCbwTFdbpNZOIuoEtKCTcI8bQKmBGcF24FdIoi8F/k
HU/q1FjDvEu7cO+CHGzOVo2ITakLYYC5zgNTkA+DRwZ+/2qG/Qm0Nl9vPHkfp1uIEoM++E7
Mdq716IzJBX0QEIp82fUjFTpwy7VjR329cmZGbcSCEw0IAk7sIgCN7w+jwGmTk+ZgF/BPKu
XRbFfz8aa3gv8Jekg7Q2rcAfkFoMY++Oo7sGuBOzq3qiEYaha9OCbMsV3uNa4CiKO7GbI8Z
5f7dg5pQx58eQCXYT8CfvugmFNB9EJuBkIJg2f0o8lj0QcT/yq3LevT2R5q2xwzMOOIdoqM
/Z4Rt9/smi0FCt19AxZmvaE+f/7UlvkMPpS50W4FY1TS3RFtki+NgLOamlkAfCIAhIemHOb
Erk09DwaDlkiNqHLcAaT9Luh6JEVdbWXVm02TugmPXtKNTXV4mRfoHtfQj8EoXz/HU4Bfi8
zes8FL/21+UnRCMlgKTahNi9JSiaUekAi2E8URurCViUlplNTXUiSfQlCrbcdiict7zEq7U
o1jk2Bbk9UrR1GAlMStE+hxIf6+x3nqhjPBjYyjMfFqDEzqge+m0Avg9sC3x72vwp6/rZpq
4YtqdvITPjVgqCqR6Ff3dHGtjHTda2u8+FQmM+FgNr+9DBCoAPxe69TmETK1mAJeiUOmlWR
zTEFUcDUmtp4shZUjg6hi8jCZkUeeRIX+bd80Nfdcg/eMRMh38iW3ISUROrHqXFD0RMjI39
BLTWM6hM47yXeNTW4UJvbhNsXr5/txBpn7ZiPJqle3JjdR9PPiCqRkE1Gr0JP61HTqJDFeX
TtBnkRNWkuCqxyQehrGnSayhKQwNddnIjisO7cZ+MwlmgA/A0yuJe7l0XIeY9HNnS7uBmgd
OBiQPZljbGzKHwbNyc9Jl5PRIAr5USuJEsVT/B2Xk+cpV0lIDO+wELkGPtMAJt9BTKH7IQM
ftpKEHlMBwVIg3o9TEGbUbapKlIkzxKxMwt10+AYsW+97g1OhV9JaVDovFoUAaqmsql9JZE
HdkcxZMyDu0oVpm42g74INGYZxIsRXH5pMjRPbPYglTvvhTsyfGoJOFeVCD1JtH9WYNqNtp
R6PRi5Ig7p3B/pA2aU85nU+AZFJ++lEK+AaS5LgU6ylZaorjlcO/eR2whVvXRADtQ+aaPkW
iBU9PwMka+qdRGNJQXx1rgq5ROzRbDVBQCSyPZrkeLnhR5oqaTw6NItc6kYK4NQ/b5iYhxf
c26EWV6L6NwSBah0l+QDzOcAc7Q5h/lUeXhGUSzpXOA5T35dlmkpnzsCEzoqeopJZ4nugGj
gHEV0sig8JWfrVpFcTXlox0xT9Kro4KxtSNNkfRqIWZ+mR2dQ2UDZyCBE0cNkl7u2godwCH
2fiuS4n77wWwGMIbtpHthWiJtnkX2lq+uh6BUeHUahisVW7UBPkVUOtSjUEw2LQ1URnpw7N
ECSofsImNJelWKNDRK0TKm7EBMfRSqk1hGaTMwb2vgpH1AtM6ig8qKlDYVivldmST7EqC8+
VMoTutwJEorzumhkF6dFL4gOBSZF8+a6nDvvoAq0T7jvTYVZSOfSEEjQHUNvioKUVHQ5rRh
PeLKyfPwQnWnIzNrAjrQ9UQ3fBnKIHbYOyOJ2v8rUPTqfY8AxYOvQk6EU0tDkWG+BnjMSdE
ShUMgSX8qsh/Xo9z8NchmzSPVeh06NM6Z2w5VdJ0KvJaQxinIFvY3cxFRr/59A4+pQ+AVu3
pCFfAVxNQOjVQY99/c4LjlbpR58etMd0aFHzOQkb6xhHkwDDHZWciB2cL+3hWFi9Zbu/uQY
X+s9+4njMZ5qKCpswSN4SgcdSaKcDi0Ar8AVgyQSrtcpb5HfPwVxo3rbJ2mUfAxmlFZZ3+E
SgccAvMsWxHj7oSqvRzGAFejNPMsVIi+2hZna5SxmoqcNH8nm9FnXBuhy3t9BxWT7EK0GOo
AVG46B1X3LUYf2wYobDUJHYK9iQbZneN0x6ZeRA8TUeViUmSQ3fsA0oY+qpFpVVPm/TyFz5
vGom8vD6OgBfOoLqdxoKa++xoBdDHcUmSfXkG0Mm4wCupPQRmsDbZQ9RT/FnAdqowqJhVeQ
N+OXU30Y4LhKG18EjoMrYh564lKZIdOVFZ5ASVSoJsIx9iVBh32zl3uhknn/ZH2qqN88ivj
rVX8O8C5yKyrJGKzWSKuH59Dcc7pqPY0zkxDKZ9ifhnVUd8B5OIVeKaOXWHRJejg+PZwNdG
YeDGsQR8PXAa8XYKZB3RWLIYqogkEh8koS1gJ3kUO9znAyv8X6QweQ3sM14S+rp6NnLADUd
as1EeTORQym40Y7RXXXxwejUZkRpyMahDGUV61gpIjDyEN8gjlbcIc3Zk6bYo/z3tzMDJFx
jYESei02IDCd9eiEtJS2dM4vQzpbOzevp+k/ziNRP1HJLTHcO3AXxED7YoyTpOQTddgE9gA
vIbCcfNQtKHT9VMKHo2VKJJyMzJnDkEfh26LzJwcciiXAo8j9fk4lr7uwcxYBPyOwhcyrxu
9NHjOxuYiP0uIhr42INt/t5T9xvEOSjz5yNlck0YmWtHXKo1oP7rs8RLSeS2KOrkPj9tI93
HvKnRoRnv0F/RyHXw0I9vffewQov9sqEf8D1JlEi06AzkDAAAAAElFTkSuQmCC" style="width:180px">
<p style="margin-top: 15px; margin-bottom: 15px;">
<span style="font-size:1.3em;">'''
html += title
html += '''</span>
<span style="font-size:1em; text-align: right; padding-right: 2px; float: right;">'''
html += '%s to %s' % (start, end)
html += '''</span>
</p>
<table>
<tr style="background-color: #F1F1F1;">'''
html += htmlHeadings
html += '</tr>'
stats = {}
print('\nRetrieving report data...')
for range in ranges:
reportParams = {
"filters": [
{
"attribute": "date",
"filterType": "TimeRange",
"timeRangeFilterParams": {
"lowerBound": range['start'],
"upperBound": range['end']
}
}
],
"sort": None,
"timezone": "America/New_York",
"limit": {
"size": 10000
}
}
preview = api('post', 'components/%s/preview' % reportNumber, reportParams, reportingv2=True)
clusters = list(set([c['system'] for c in preview['component']['data']]))
for cluster in clusters:
data = [d for d in preview['component']['data'] if d['system'] == cluster]
for i in data:
clusterName = i['system'].upper()
jobName = i['groupName']
sourceName = i['sourceName']
objectName = i['objectName']
environment = i['environment'][1:]
uniqueKey = "%s:%s:%s:%s" % (clusterName, sourceName, objectName, environment)
policy = i['policyName']
lastFailure = usecsToDate(i['lastFailedRunUsecs'], '%Y-%m-%d %H:%M')
failedBackups = i['failedBackups']
strikes = i['strikeCount']
lastError = i['lastFailedRunErrorMsg']
if len(lastError) > 301:
lastError = lastError[0:300]
if uniqueKey not in stats:
stats[uniqueKey] = {
'clusterName': clusterName,
'sourceName': sourceName,
'objectName': objectName,
'environment': environment,
'jobName': jobName,
'policy': policy,
'lastFailure': lastFailure,
'failedBackups': failedBackups,
'strikes': strikes,
'lastError': lastError
}
else:
stats[uniqueKey]['failedBackups'] += failedBackups
for uniqueKey in sorted(stats.keys()):
clusterName = stats[uniqueKey]['clusterName']
sourceName = stats[uniqueKey]['sourceName']
objectName = stats[uniqueKey]['objectName']
environment = stats[uniqueKey]['environment']
jobName = stats[uniqueKey]['jobName']
policy = stats[uniqueKey]['policy']
lastFailure = stats[uniqueKey]['lastFailure']
failedBackups = stats[uniqueKey]['failedBackups']
strikes = stats[uniqueKey]['strikes']
lastError = stats[uniqueKey]['lastError'].replace('\n', '/').replace('\t', '/')
tsv.write('%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\n' % (clusterName, jobName, sourceName, objectName, environment, policy, lastFailure, failedBackups, strikes, lastError))
csv.write('"%s","%s","%s","%s","%s","%s","%s","%s","%s","%s"\n' % (clusterName, jobName, sourceName, objectName, environment, policy, lastFailure, failedBackups, strikes, lastError))
html += '''<tr>
<td class="nowrap">%s</td>
<td>%s</td>
<td>%s</td>
<td>%s</td>
<td>%s</td>
<td>%s</td>
<td class="nowrap">%s</td>
<td>%s</td>
<td>%s</td>
<td>%s</td>
</tr>''' % (clusterName, jobName, sourceName, objectName, environment, policy, lastFailure, failedBackups, strikes, lastError)
html += '''</table>
</div>
</body>
</html>'''
htmlFile.write(html)
htmlFile.close()
tsv.close()
csv.close()
print('\nOutput saved to\n CSV: %s\n HTML: %s\n TSV: %s\n' % (csvFileName, htmlFileName, tsvFileName))
| 40.601156 | 186 | 0.681592 |
d5ed6760af723e49461b31cc89ec86dd56c7c723 | 2,195 | py | Python | misc/sampleapp-android-tests/sampleapp/webrtc_pack.py | jiajiax/crosswalk-test-suite | 7606ec383f9c240248cbea071b642691fbffb32f | [
"BSD-3-Clause"
] | null | null | null | misc/sampleapp-android-tests/sampleapp/webrtc_pack.py | jiajiax/crosswalk-test-suite | 7606ec383f9c240248cbea071b642691fbffb32f | [
"BSD-3-Clause"
] | null | null | null | misc/sampleapp-android-tests/sampleapp/webrtc_pack.py | jiajiax/crosswalk-test-suite | 7606ec383f9c240248cbea071b642691fbffb32f | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
#
# Copyright (c) 2015 Intel Corporation.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of works must retain the original copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the original copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of Intel Corporation nor the names of its contributors
# may be used to endorse or promote products derived from this work without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL INTEL CORPORATION BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors:
# Li, Cici<cici.x.li@intel.com>
import unittest
import os
import sys
import commands
import comm
class TestSampleAppFunctions(unittest.TestCase):
def test_pack(self):
comm.setUp()
app_name = "Webrtc"
sample_src = "/WebRTC/"
manifest_file = comm.sample_src_pref + sample_src + "manifest.json"
cmd = "python %smake_apk.py --package=org.xwalk.%s --manifest=%s --arch=%s --mode=%s --enable-remote-debugging" % \
(comm.pack_tools,
app_name.lower(),
manifest_file,
comm.ARCH,
comm.MODE)
comm.pack(cmd, app_name, self)
if __name__ == '__main__':
unittest.main()
| 39.909091 | 123 | 0.72574 |
1e27b26bb271b77d46eed0aeeff9b7db7a5746ed | 244 | py | Python | hbase_util_test.py | wangzishuo111/bk_zhangdan | 30be7d92c53de4f18d90c00aba1ee73073f47029 | [
"MIT"
] | null | null | null | hbase_util_test.py | wangzishuo111/bk_zhangdan | 30be7d92c53de4f18d90c00aba1ee73073f47029 | [
"MIT"
] | null | null | null | hbase_util_test.py | wangzishuo111/bk_zhangdan | 30be7d92c53de4f18d90c00aba1ee73073f47029 | [
"MIT"
] | null | null | null | #!/usr/bin/python
# -*- coding: utf-8 -*-
import hbase_util
from nose.tools import nottest,istest
from nose.tools import assert_equal, assert_true, assert_false
def setUp():
pass
# hbase_util.switch2local()
def test1():
pass
| 13.555556 | 62 | 0.696721 |
ab06a3a40826e7d41c040066fd41c56c1ed84ad2 | 26,666 | py | Python | tensorflow/python/layers/pooling.py | qinchangping/tensorflow | f7f7036d1cdc5716aff976fae0ea4d1b9a931b56 | [
"Apache-2.0"
] | 24 | 2018-02-01T15:49:22.000Z | 2021-01-11T16:31:18.000Z | tensorflow/python/layers/pooling.py | qinchangping/tensorflow | f7f7036d1cdc5716aff976fae0ea4d1b9a931b56 | [
"Apache-2.0"
] | 2 | 2017-08-01T21:11:06.000Z | 2017-08-01T23:07:02.000Z | tensorflow/python/layers/pooling.py | qinchangping/tensorflow | f7f7036d1cdc5716aff976fae0ea4d1b9a931b56 | [
"Apache-2.0"
] | 4 | 2018-10-29T18:43:22.000Z | 2020-09-28T07:19:52.000Z | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
# pylint: disable=unused-import,g-bad-import-order
"""Contains the pooling layer classes and their functional aliases.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.eager import context
from tensorflow.python.framework import tensor_shape
from tensorflow.python.layers import base
from tensorflow.python.layers import utils
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import nn
class _Pooling1D(base.Layer):
"""Pooling layer for arbitrary pooling functions, for 1D inputs.
This class only exists for code reuse. It will never be an exposed API.
Arguments:
pool_function: The pooling function to apply, e.g. `tf.nn.max_pool`.
pool_size: An integer or tuple/list of a single integer,
representing the size of the pooling window.
strides: An integer or tuple/list of a single integer, specifying the
strides of the pooling operation.
padding: A string. The padding method, either 'valid' or 'same'.
Case-insensitive.
data_format: A string, one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, length, channels)` while `channels_first` corresponds to
inputs with shape `(batch, channels, length)`.
name: A string, the name of the layer.
"""
def __init__(self, pool_function, pool_size, strides,
padding='valid', data_format='channels_last',
name=None, **kwargs):
super(_Pooling1D, self).__init__(name=name, **kwargs)
self.pool_function = pool_function
self.pool_size = utils.normalize_tuple(pool_size, 1, 'pool_size')
self.strides = utils.normalize_tuple(strides, 1, 'strides')
self.padding = utils.normalize_padding(padding)
self.data_format = utils.normalize_data_format(data_format)
self.input_spec = base.InputSpec(ndim=3)
def call(self, inputs):
# There is no TF op for 1D pooling, hence we make the inputs 4D.
if self.data_format == 'channels_last':
# input is NWC, make it NHWC
inputs = array_ops.expand_dims(inputs, 1)
# pool on the W dim
pool_shape = (1, 1) + self.pool_size + (1,)
strides = (1, 1) + self.strides + (1,)
data_format = 'NHWC'
else:
# input is NCW, make it NCHW
inputs = array_ops.expand_dims(inputs, 2)
# pool on the W dim
pool_shape = (1, 1, 1) + self.pool_size
strides = (1, 1, 1) + self.strides
data_format = 'NCHW'
outputs = self.pool_function(
inputs,
ksize=pool_shape,
strides=strides,
padding=self.padding.upper(),
data_format=data_format)
if self.data_format == 'channels_last':
return array_ops.squeeze(outputs, 1)
else:
return array_ops.squeeze(outputs, 2)
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
length = utils.conv_output_length(input_shape[1], self.pool_size[0],
self.padding, self.strides[0])
return tensor_shape.TensorShape([input_shape[0], length, input_shape[2]])
class AveragePooling1D(_Pooling1D):
"""Average Pooling layer for 1D inputs.
Arguments:
pool_size: An integer or tuple/list of a single integer,
representing the size of the pooling window.
strides: An integer or tuple/list of a single integer, specifying the
strides of the pooling operation.
padding: A string. The padding method, either 'valid' or 'same'.
Case-insensitive.
data_format: A string, one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, length, channels)` while `channels_first` corresponds to
inputs with shape `(batch, channels, length)`.
name: A string, the name of the layer.
"""
def __init__(self, pool_size, strides,
padding='valid', data_format='channels_last',
name=None, **kwargs):
super(AveragePooling1D, self).__init__(
nn.avg_pool,
pool_size=pool_size,
strides=strides,
padding=padding,
data_format=data_format,
name=name,
**kwargs)
def average_pooling1d(inputs, pool_size, strides,
padding='valid', data_format='channels_last',
name=None):
"""Average Pooling layer for 1D inputs.
Arguments:
inputs: The tensor over which to pool. Must have rank 3.
pool_size: An integer or tuple/list of a single integer,
representing the size of the pooling window.
strides: An integer or tuple/list of a single integer, specifying the
strides of the pooling operation.
padding: A string. The padding method, either 'valid' or 'same'.
Case-insensitive.
data_format: A string, one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, length, channels)` while `channels_first` corresponds to
inputs with shape `(batch, channels, length)`.
name: A string, the name of the layer.
Returns:
The output tensor, of rank 3.
Raises:
ValueError: if eager execution is enabled.
"""
layer = AveragePooling1D(pool_size=pool_size,
strides=strides,
padding=padding,
data_format=data_format,
name=name)
return layer.apply(inputs)
class MaxPooling1D(_Pooling1D):
"""Max Pooling layer for 1D inputs.
Arguments:
pool_size: An integer or tuple/list of a single integer,
representing the size of the pooling window.
strides: An integer or tuple/list of a single integer, specifying the
strides of the pooling operation.
padding: A string. The padding method, either 'valid' or 'same'.
Case-insensitive.
data_format: A string, one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, length, channels)` while `channels_first` corresponds to
inputs with shape `(batch, channels, length)`.
name: A string, the name of the layer.
"""
def __init__(self, pool_size, strides,
padding='valid', data_format='channels_last',
name=None, **kwargs):
super(MaxPooling1D, self).__init__(
nn.max_pool,
pool_size=pool_size,
strides=strides,
padding=padding,
data_format=data_format,
name=name,
**kwargs)
def max_pooling1d(inputs, pool_size, strides,
padding='valid', data_format='channels_last',
name=None):
"""Max Pooling layer for 1D inputs.
Arguments:
inputs: The tensor over which to pool. Must have rank 3.
pool_size: An integer or tuple/list of a single integer,
representing the size of the pooling window.
strides: An integer or tuple/list of a single integer, specifying the
strides of the pooling operation.
padding: A string. The padding method, either 'valid' or 'same'.
Case-insensitive.
data_format: A string, one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, length, channels)` while `channels_first` corresponds to
inputs with shape `(batch, channels, length)`.
name: A string, the name of the layer.
Returns:
The output tensor, of rank 3.
Raises:
ValueError: if eager execution is enabled.
"""
layer = MaxPooling1D(pool_size=pool_size,
strides=strides,
padding=padding,
data_format=data_format,
name=name)
return layer.apply(inputs)
class _Pooling2D(base.Layer):
"""Pooling layer for arbitrary pooling functions, for 2D inputs (e.g. images).
This class only exists for code reuse. It will never be an exposed API.
Arguments:
pool_function: The pooling function to apply, e.g. `tf.nn.max_pool`.
pool_size: An integer or tuple/list of 2 integers: (pool_height, pool_width)
specifying the size of the pooling window.
Can be a single integer to specify the same value for
all spatial dimensions.
strides: An integer or tuple/list of 2 integers,
specifying the strides of the pooling operation.
Can be a single integer to specify the same value for
all spatial dimensions.
padding: A string. The padding method, either 'valid' or 'same'.
Case-insensitive.
data_format: A string, one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, height, width, channels)` while `channels_first` corresponds to
inputs with shape `(batch, channels, height, width)`.
name: A string, the name of the layer.
"""
def __init__(self, pool_function, pool_size, strides,
padding='valid', data_format='channels_last',
name=None, **kwargs):
super(_Pooling2D, self).__init__(name=name, **kwargs)
self.pool_function = pool_function
self.pool_size = utils.normalize_tuple(pool_size, 2, 'pool_size')
self.strides = utils.normalize_tuple(strides, 2, 'strides')
self.padding = utils.normalize_padding(padding)
self.data_format = utils.normalize_data_format(data_format)
self.input_spec = base.InputSpec(ndim=4)
def call(self, inputs):
if self.data_format == 'channels_last':
pool_shape = (1,) + self.pool_size + (1,)
strides = (1,) + self.strides + (1,)
else:
pool_shape = (1, 1) + self.pool_size
strides = (1, 1) + self.strides
outputs = self.pool_function(
inputs,
ksize=pool_shape,
strides=strides,
padding=self.padding.upper(),
data_format=utils.convert_data_format(self.data_format, 4))
return outputs
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
if self.data_format == 'channels_first':
rows = input_shape[2]
cols = input_shape[3]
else:
rows = input_shape[1]
cols = input_shape[2]
rows = utils.conv_output_length(rows, self.pool_size[0], self.padding,
self.strides[0])
cols = utils.conv_output_length(cols, self.pool_size[1], self.padding,
self.strides[1])
if self.data_format == 'channels_first':
return tensor_shape.TensorShape(
[input_shape[0], input_shape[1], rows, cols])
else:
return tensor_shape.TensorShape(
[input_shape[0], rows, cols, input_shape[3]])
class AveragePooling2D(_Pooling2D):
"""Average pooling layer for 2D inputs (e.g. images).
Arguments:
pool_size: An integer or tuple/list of 2 integers: (pool_height, pool_width)
specifying the size of the pooling window.
Can be a single integer to specify the same value for
all spatial dimensions.
strides: An integer or tuple/list of 2 integers,
specifying the strides of the pooling operation.
Can be a single integer to specify the same value for
all spatial dimensions.
padding: A string. The padding method, either 'valid' or 'same'.
Case-insensitive.
data_format: A string. The ordering of the dimensions in the inputs.
`channels_last` (default) and `channels_first` are supported.
`channels_last` corresponds to inputs with shape
`(batch, height, width, channels)` while `channels_first` corresponds to
inputs with shape `(batch, channels, height, width)`.
name: A string, the name of the layer.
"""
def __init__(self, pool_size, strides,
padding='valid', data_format='channels_last',
name=None, **kwargs):
super(AveragePooling2D, self).__init__(
nn.avg_pool,
pool_size=pool_size, strides=strides,
padding=padding, data_format=data_format, name=name, **kwargs)
def average_pooling2d(inputs,
pool_size, strides,
padding='valid', data_format='channels_last',
name=None):
"""Average pooling layer for 2D inputs (e.g. images).
Arguments:
inputs: The tensor over which to pool. Must have rank 4.
pool_size: An integer or tuple/list of 2 integers: (pool_height, pool_width)
specifying the size of the pooling window.
Can be a single integer to specify the same value for
all spatial dimensions.
strides: An integer or tuple/list of 2 integers,
specifying the strides of the pooling operation.
Can be a single integer to specify the same value for
all spatial dimensions.
padding: A string. The padding method, either 'valid' or 'same'.
Case-insensitive.
data_format: A string. The ordering of the dimensions in the inputs.
`channels_last` (default) and `channels_first` are supported.
`channels_last` corresponds to inputs with shape
`(batch, height, width, channels)` while `channels_first` corresponds to
inputs with shape `(batch, channels, height, width)`.
name: A string, the name of the layer.
Returns:
Output tensor.
Raises:
ValueError: if eager execution is enabled.
"""
layer = AveragePooling2D(pool_size=pool_size, strides=strides,
padding=padding, data_format=data_format,
name=name)
return layer.apply(inputs)
class MaxPooling2D(_Pooling2D):
"""Max pooling layer for 2D inputs (e.g. images).
Arguments:
pool_size: An integer or tuple/list of 2 integers: (pool_height, pool_width)
specifying the size of the pooling window.
Can be a single integer to specify the same value for
all spatial dimensions.
strides: An integer or tuple/list of 2 integers,
specifying the strides of the pooling operation.
Can be a single integer to specify the same value for
all spatial dimensions.
padding: A string. The padding method, either 'valid' or 'same'.
Case-insensitive.
data_format: A string. The ordering of the dimensions in the inputs.
`channels_last` (default) and `channels_first` are supported.
`channels_last` corresponds to inputs with shape
`(batch, height, width, channels)` while `channels_first` corresponds to
inputs with shape `(batch, channels, height, width)`.
name: A string, the name of the layer.
"""
def __init__(self, pool_size, strides,
padding='valid', data_format='channels_last',
name=None, **kwargs):
super(MaxPooling2D, self).__init__(
nn.max_pool,
pool_size=pool_size, strides=strides,
padding=padding, data_format=data_format, name=name, **kwargs)
def max_pooling2d(inputs,
pool_size, strides,
padding='valid', data_format='channels_last',
name=None):
"""Max pooling layer for 2D inputs (e.g. images).
Arguments:
inputs: The tensor over which to pool. Must have rank 4.
pool_size: An integer or tuple/list of 2 integers: (pool_height, pool_width)
specifying the size of the pooling window.
Can be a single integer to specify the same value for
all spatial dimensions.
strides: An integer or tuple/list of 2 integers,
specifying the strides of the pooling operation.
Can be a single integer to specify the same value for
all spatial dimensions.
padding: A string. The padding method, either 'valid' or 'same'.
Case-insensitive.
data_format: A string. The ordering of the dimensions in the inputs.
`channels_last` (default) and `channels_first` are supported.
`channels_last` corresponds to inputs with shape
`(batch, height, width, channels)` while `channels_first` corresponds to
inputs with shape `(batch, channels, height, width)`.
name: A string, the name of the layer.
Returns:
Output tensor.
Raises:
ValueError: if eager execution is enabled.
"""
layer = MaxPooling2D(pool_size=pool_size, strides=strides,
padding=padding, data_format=data_format,
name=name)
return layer.apply(inputs)
class _Pooling3D(base.Layer):
"""Pooling layer for arbitrary pooling functions, for 3D inputs.
This class only exists for code reuse. It will never be an exposed API.
Arguments:
pool_function: The pooling function to apply, e.g. `tf.nn.max_pool`.
pool_size: An integer or tuple/list of 3 integers:
(pool_depth, pool_height, pool_width)
specifying the size of the pooling window.
Can be a single integer to specify the same value for
all spatial dimensions.
strides: An integer or tuple/list of 3 integers,
specifying the strides of the pooling operation.
Can be a single integer to specify the same value for
all spatial dimensions.
padding: A string. The padding method, either 'valid' or 'same'.
Case-insensitive.
data_format: A string, one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, depth, height, width, channels)`
while `channels_first` corresponds to
inputs with shape `(batch, channels, depth, height, width)`.
name: A string, the name of the layer.
"""
def __init__(self, pool_function, pool_size, strides,
padding='valid', data_format='channels_last',
name=None, **kwargs):
super(_Pooling3D, self).__init__(name=name, **kwargs)
self.pool_function = pool_function
self.pool_size = utils.normalize_tuple(pool_size, 3, 'pool_size')
self.strides = utils.normalize_tuple(strides, 3, 'strides')
self.padding = utils.normalize_padding(padding)
self.data_format = utils.normalize_data_format(data_format)
self.input_spec = base.InputSpec(ndim=5)
def call(self, inputs):
pool_shape = (1,) + self.pool_size + (1,)
strides = (1,) + self.strides + (1,)
if self.data_format == 'channels_first':
# TF does not support `channels_first` with 3D pooling operations,
# so we must handle this case manually.
# TODO(fchollet): remove this when TF pooling is feature-complete.
inputs = array_ops.transpose(inputs, (0, 2, 3, 4, 1))
outputs = self.pool_function(
inputs,
ksize=pool_shape,
strides=strides,
padding=self.padding.upper())
if self.data_format == 'channels_first':
outputs = array_ops.transpose(outputs, (0, 4, 1, 2, 3))
return outputs
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
if self.data_format == 'channels_first':
len_dim1 = input_shape[2]
len_dim2 = input_shape[3]
len_dim3 = input_shape[4]
else:
len_dim1 = input_shape[1]
len_dim2 = input_shape[2]
len_dim3 = input_shape[3]
len_dim1 = utils.conv_output_length(len_dim1, self.pool_size[0],
self.padding, self.strides[0])
len_dim2 = utils.conv_output_length(len_dim2, self.pool_size[1],
self.padding, self.strides[1])
len_dim3 = utils.conv_output_length(len_dim3, self.pool_size[2],
self.padding, self.strides[2])
if self.data_format == 'channels_first':
return tensor_shape.TensorShape(
[input_shape[0], input_shape[1], len_dim1, len_dim2, len_dim3])
else:
return tensor_shape.TensorShape(
[input_shape[0], len_dim1, len_dim2, len_dim3, input_shape[4]])
class AveragePooling3D(_Pooling3D):
"""Average pooling layer for 3D inputs (e.g. volumes).
Arguments:
pool_size: An integer or tuple/list of 3 integers:
(pool_depth, pool_height, pool_width)
specifying the size of the pooling window.
Can be a single integer to specify the same value for
all spatial dimensions.
strides: An integer or tuple/list of 3 integers,
specifying the strides of the pooling operation.
Can be a single integer to specify the same value for
all spatial dimensions.
padding: A string. The padding method, either 'valid' or 'same'.
Case-insensitive.
data_format: A string. The ordering of the dimensions in the inputs.
`channels_last` (default) and `channels_first` are supported.
`channels_last` corresponds to inputs with shape
`(batch, depth, height, width, channels)` while `channels_first`
corresponds to inputs with shape
`(batch, channels, depth, height, width)`.
name: A string, the name of the layer.
"""
def __init__(self, pool_size, strides,
padding='valid', data_format='channels_last',
name=None, **kwargs):
super(AveragePooling3D, self).__init__(
nn.avg_pool3d,
pool_size=pool_size, strides=strides,
padding=padding, data_format=data_format, name=name, **kwargs)
def average_pooling3d(inputs,
pool_size, strides,
padding='valid', data_format='channels_last',
name=None):
"""Average pooling layer for 3D inputs (e.g. volumes).
Arguments:
inputs: The tensor over which to pool. Must have rank 5.
pool_size: An integer or tuple/list of 3 integers:
(pool_depth, pool_height, pool_width)
specifying the size of the pooling window.
Can be a single integer to specify the same value for
all spatial dimensions.
strides: An integer or tuple/list of 3 integers,
specifying the strides of the pooling operation.
Can be a single integer to specify the same value for
all spatial dimensions.
padding: A string. The padding method, either 'valid' or 'same'.
Case-insensitive.
data_format: A string. The ordering of the dimensions in the inputs.
`channels_last` (default) and `channels_first` are supported.
`channels_last` corresponds to inputs with shape
`(batch, depth, height, width, channels)` while `channels_first`
corresponds to inputs with shape
`(batch, channels, depth, height, width)`.
name: A string, the name of the layer.
Returns:
Output tensor.
Raises:
ValueError: if eager execution is enabled.
"""
layer = AveragePooling3D(pool_size=pool_size, strides=strides,
padding=padding, data_format=data_format,
name=name)
return layer.apply(inputs)
class MaxPooling3D(_Pooling3D):
"""Max pooling layer for 3D inputs (e.g. volumes).
Arguments:
pool_size: An integer or tuple/list of 3 integers:
(pool_depth, pool_height, pool_width)
specifying the size of the pooling window.
Can be a single integer to specify the same value for
all spatial dimensions.
strides: An integer or tuple/list of 3 integers,
specifying the strides of the pooling operation.
Can be a single integer to specify the same value for
all spatial dimensions.
padding: A string. The padding method, either 'valid' or 'same'.
Case-insensitive.
data_format: A string. The ordering of the dimensions in the inputs.
`channels_last` (default) and `channels_first` are supported.
`channels_last` corresponds to inputs with shape
`(batch, depth, height, width, channels)` while `channels_first`
corresponds to inputs with shape
`(batch, channels, depth, height, width)`.
name: A string, the name of the layer.
"""
def __init__(self, pool_size, strides,
padding='valid', data_format='channels_last',
name=None, **kwargs):
super(MaxPooling3D, self).__init__(
nn.max_pool3d,
pool_size=pool_size, strides=strides,
padding=padding, data_format=data_format, name=name, **kwargs)
def max_pooling3d(inputs,
pool_size, strides,
padding='valid', data_format='channels_last',
name=None):
"""Max pooling layer for 3D inputs (e.g. volumes).
Arguments:
inputs: The tensor over which to pool. Must have rank 5.
pool_size: An integer or tuple/list of 3 integers:
(pool_depth, pool_height, pool_width)
specifying the size of the pooling window.
Can be a single integer to specify the same value for
all spatial dimensions.
strides: An integer or tuple/list of 3 integers,
specifying the strides of the pooling operation.
Can be a single integer to specify the same value for
all spatial dimensions.
padding: A string. The padding method, either 'valid' or 'same'.
Case-insensitive.
data_format: A string. The ordering of the dimensions in the inputs.
`channels_last` (default) and `channels_first` are supported.
`channels_last` corresponds to inputs with shape
`(batch, depth, height, width, channels)` while `channels_first`
corresponds to inputs with shape
`(batch, channels, depth, height, width)`.
name: A string, the name of the layer.
Returns:
Output tensor.
Raises:
ValueError: if eager execution is enabled.
"""
layer = MaxPooling3D(pool_size=pool_size, strides=strides,
padding=padding, data_format=data_format,
name=name)
return layer.apply(inputs)
# Aliases
AvgPool2D = AveragePooling2D
MaxPool2D = MaxPooling2D
max_pool2d = max_pooling2d
avg_pool2d = average_pooling2d
| 39.97901 | 80 | 0.671679 |
7b6c43f844d78a853b052957e1661e7928a642eb | 7,436 | py | Python | mamba/core/rmap_utils/rmap_common.py | ismaelJimenez/mamba_server | e6e2343291a0df24f226bde0d13e5bfa74cc3650 | [
"MIT"
] | null | null | null | mamba/core/rmap_utils/rmap_common.py | ismaelJimenez/mamba_server | e6e2343291a0df24f226bde0d13e5bfa74cc3650 | [
"MIT"
] | null | null | null | mamba/core/rmap_utils/rmap_common.py | ismaelJimenez/mamba_server | e6e2343291a0df24f226bde0d13e5bfa74cc3650 | [
"MIT"
] | null | null | null | ############################################################################
#
# Copyright (c) Mamba Developers. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
############################################################################
import struct
from mamba.core.exceptions import ComponentConfigException
from mamba.core.rmap_utils.crc_8 import crc_8
def get_rmap_cmd_code(write: bool, verify: bool, reply: bool, inc: bool):
_packet_type = 0x40 # command (0b01)
_write = write * 0x20
_verify = verify * 0x10
_reply = reply * 0x8
_inc = inc * 0x4
return _packet_type | _write | _verify | _reply | _inc
class RMAP:
def __init__(self, rmap_config: dict) -> None:
self.target_logical_address = rmap_config.get('target_logical_address',
0xFE)
self.protocol_identifier = 1 # RMAP protocol identifier
key = rmap_config.get('key')
if isinstance(key, int):
self.key = rmap_config.get('key')
else:
raise ComponentConfigException(
"Missing Key in component configuration")
self.initiator_logical_address = rmap_config.get(
'initiator_logical_address', 0xFE)
self.transaction_id = 1
def get_rmap_cmd(self, write: bool, verify: bool, reply: bool, inc: bool,
address: int, size: int, data_hex_str: str,
extended_addr: int) -> bytes:
rmap_cmd_code = get_rmap_cmd_code(write, verify, reply, inc)
if size == 0 and len(data_hex_str) > 0:
size = int(len(data_hex_str) / 2)
rmap_header = struct.pack(
"BBBBBBBBBBBBBBB", self.target_logical_address,
self.protocol_identifier, rmap_cmd_code, self.key,
self.initiator_logical_address, self.transaction_id >> 8,
self.transaction_id & 0xff, extended_addr, (address >> 24) & 0xff,
(address >> 16) & 0xff, (address >> 8) & 0xff,
(address >> 0) & 0xff, (size >> 16) & 0xff, (size >> 8) & 0xff,
(size >> 0) & 0xff)
msg = rmap_header + crc_8(rmap_header)
if len(data_hex_str) > 0:
msg = msg + bytes.fromhex(data_hex_str) + crc_8(
bytes.fromhex(data_hex_str))
self.transaction_id += 1
if self.transaction_id > 65535:
self.transaction_id = 1
return msg
def rmap_bytes_to_dict(rmap_bytes_msg):
if len(rmap_bytes_msg) < 8:
return None
instruction = rmap_bytes_msg[2]
packet_type = instruction >> 6
write = (instruction & 0x3F) >> 5
if packet_type == 1:
if write == 1:
return {
'target_logical_address':
rmap_bytes_msg[0],
'protocol_identifier':
rmap_bytes_msg[1],
'packet_type':
packet_type,
'cmd_write':
write,
'cmd_verify': (instruction & 0x1F) >> 4,
'cmd_reply': (instruction & 0xF) >> 3,
'cmd_incr': (instruction & 0x7) >> 2,
'key':
rmap_bytes_msg[3],
'initiator_logical_address':
rmap_bytes_msg[4],
'transaction_id':
int.from_bytes(rmap_bytes_msg[5:7], 'big'),
'extended_addr':
rmap_bytes_msg[7],
'address':
int.from_bytes(rmap_bytes_msg[8:12], 'big'),
'data_length':
int.from_bytes(rmap_bytes_msg[12:15], 'big'),
'data_length_valid':
int.from_bytes(rmap_bytes_msg[12:15],
'big') == len(rmap_bytes_msg) - 17,
'header_crc':
bytes([rmap_bytes_msg[15]]),
'header_crc_valid':
crc_8(rmap_bytes_msg[0:15]) == bytes([rmap_bytes_msg[15]]),
'data':
rmap_bytes_msg[16:-1],
'data_crc':
bytes([rmap_bytes_msg[-1]]),
'data_crc_valid':
crc_8(rmap_bytes_msg[16:-1]) == bytes([rmap_bytes_msg[-1]]),
}
else:
return {
'target_logical_address':
rmap_bytes_msg[0],
'protocol_identifier':
rmap_bytes_msg[1],
'packet_type':
packet_type,
'cmd_write':
write,
'cmd_verify': (instruction & 0x1F) >> 4,
'cmd_reply': (instruction & 0xF) >> 3,
'cmd_incr': (instruction & 0x7) >> 2,
'key':
rmap_bytes_msg[3],
'initiator_logical_address':
rmap_bytes_msg[4],
'transaction_id':
int.from_bytes(rmap_bytes_msg[5:7], 'big'),
'extended_addr':
rmap_bytes_msg[7],
'address':
int.from_bytes(rmap_bytes_msg[8:12], 'big'),
'data_length':
int.from_bytes(rmap_bytes_msg[12:15], 'big'),
'header_crc':
bytes([rmap_bytes_msg[15]]),
'header_crc_valid':
crc_8(rmap_bytes_msg[0:15]) == bytes([rmap_bytes_msg[15]]),
}
elif write == 1:
return {
'initiator_logical_address':
rmap_bytes_msg[0],
'protocol_identifier':
rmap_bytes_msg[1],
'packet_type':
packet_type,
'cmd_write': (instruction & 0x3F) >> 5,
'cmd_verify': (instruction & 0x1F) >> 4,
'cmd_reply': (instruction & 0xF) >> 3,
'cmd_incr': (instruction & 0x7) >> 2,
'status':
rmap_bytes_msg[3],
'target_logical_address':
rmap_bytes_msg[4],
'transaction_id':
int.from_bytes(rmap_bytes_msg[5:7], 'big'),
'header_crc':
bytes([rmap_bytes_msg[7]]),
'header_crc_valid':
crc_8(rmap_bytes_msg[0:7]) == bytes([rmap_bytes_msg[7]]),
}
else:
return {
'initiator_logical_address':
rmap_bytes_msg[0],
'protocol_identifier':
rmap_bytes_msg[1],
'packet_type':
packet_type,
'cmd_write': (instruction & 0x3F) >> 5,
'cmd_verify': (instruction & 0x1F) >> 4,
'cmd_reply': (instruction & 0xF) >> 3,
'cmd_incr': (instruction & 0x7) >> 2,
'status':
rmap_bytes_msg[3],
'target_logical_address':
rmap_bytes_msg[4],
'transaction_id':
int.from_bytes(rmap_bytes_msg[5:7], 'big'),
'reserved':
rmap_bytes_msg[7],
'data_length':
int.from_bytes(rmap_bytes_msg[8:11], 'big'),
'header_crc':
bytes([rmap_bytes_msg[11]]),
'header_crc_valid':
crc_8(rmap_bytes_msg[0:11]) == bytes([rmap_bytes_msg[11]]),
'data':
rmap_bytes_msg[12:-1],
'data_crc':
bytes([rmap_bytes_msg[-1]]),
'data_crc_valid':
crc_8(rmap_bytes_msg[12:-1]) == bytes([rmap_bytes_msg[-1]]),
}
| 35.578947 | 79 | 0.500134 |
08f21bf72d0723b8c7469e1ab477aeb0ab3c4664 | 255 | py | Python | bot_save_last_price_zaif.py | ysawa/zaif-trade-bot | 4e5e62d2d03fff773b018ff960e2d9a0587c9a6e | [
"MIT"
] | 1 | 2018-02-01T06:24:49.000Z | 2018-02-01T06:24:49.000Z | bot_save_last_price_zaif.py | ysawa/zaif-trade-bot | 4e5e62d2d03fff773b018ff960e2d9a0587c9a6e | [
"MIT"
] | 5 | 2020-03-24T15:26:43.000Z | 2021-04-30T20:34:03.000Z | bot_save_last_price_zaif.py | ysawa/coin-trader | 4e5e62d2d03fff773b018ff960e2d9a0587c9a6e | [
"MIT"
] | null | null | null | import os
from base.save_last_price import save_last_price
from zaif.api import ZaifApi
DIRECTORY = os.path.join("data", "last_price", "zaif")
def main():
api = ZaifApi()
save_last_price(api, DIRECTORY)
if __name__ == '__main__':
main()
| 15.9375 | 54 | 0.701961 |
0b294c419235db7de95f4cafeb3256dfee520ef4 | 174 | py | Python | app/__init__.py | devdazed/django-docker-template | 520968ac5cd54070885de41f0e725c310f1cf380 | [
"MIT"
] | null | null | null | app/__init__.py | devdazed/django-docker-template | 520968ac5cd54070885de41f0e725c310f1cf380 | [
"MIT"
] | null | null | null | app/__init__.py | devdazed/django-docker-template | 520968ac5cd54070885de41f0e725c310f1cf380 | [
"MIT"
] | null | null | null |
# This will make sure the app is always imported when
# Django starts so that shared_task will use this app.
from .celery import app as celery_app
__all__ = ('celery_app',) | 29 | 54 | 0.764368 |
e388e1b9dbaacf498d8f1bbc86df30b03258f3e7 | 8,312 | py | Python | src/nautilus_scripts/src/subway_car/subway_car.py | ungjus/nautilus_surface | 5dbce89f3f7ad4b614e7c3aa7d57394d301adeac | [
"MIT"
] | 4 | 2020-11-29T02:23:13.000Z | 2021-02-24T20:44:38.000Z | src/nautilus_scripts/src/subway_car/subway_car.py | ungjus/nautilus_surface | 5dbce89f3f7ad4b614e7c3aa7d57394d301adeac | [
"MIT"
] | 3 | 2020-10-22T02:10:04.000Z | 2021-02-14T02:46:21.000Z | src/nautilus_scripts/src/subway_car/subway_car.py | laurenk104/in_house_team_uw | 59cb385688a713b0b49c069f2ba81351b4cfaf42 | [
"MIT"
] | 4 | 2021-11-06T21:28:26.000Z | 2021-12-28T03:06:40.000Z | #!/usr/bin/env python3
import cv2
import numpy as np
import imutils
RECTANGLE_THRESHOLD = 150
CENTER_THRESHOLD_VARIANCE = 20
PIXELS_PER_CM = 1
LENGTH = 110
HEIGHT = 50
WIDTH = 50
DISPLAY_IMAGE_SCALE = 1
IMAGE_CORNER = (60, 60)
captureButtonLastState = False
captureButtonCurrentState = False
captureButtonDeleteState = False
def resizeWithAspectRatio(image, width=None, height=None, inter=cv2.INTER_AREA):
""" Resizes an image while maintaining aspect ratio.
"""
dim = None
(h, w) = image.shape[:2]
if width is None and height is None:
return image
if width is None:
r = height / float(h)
dim = (int(w * r), height)
else:
r = width / float(w)
dim = (width, int(h * r))
return cv2.resize(image, dim, interpolation=inter)
def getRectangleImage(img, size):
""" Converts an image to a cropped, translated image of a rectangle
:param img: The original image (in color)
:return: A transformed image cropped to the rectangle (or None if none found)
:return: Points defining the rectangle found
"""
gray_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
rows, columns, channels = img.shape
centerPixel = img[rows // 2, columns // 2]
sum = 0
for i in range(3):
sum += centerPixel[i]
sum = (sum // 3) - CENTER_THRESHOLD_VARIANCE
RECTANGLE_THRESHOLD = sum
ret, binary_img = cv2.threshold(gray_img, RECTANGLE_THRESHOLD, 255, cv2.THRESH_BINARY)
points = getRectangle(binary_img)
if points is None:
# print("getRectangleImage(): No Rectangle Found."
return None, None
transformed_img = transformImage(img.copy(), size, points)
return transformed_img, points
def getRectangle(binary_img):
""" Finds a rectangle in a binary image and returns its corners (points)
:param binary_img: black and white image (thresholded image)
:returns: If there is a rectangle, returns the points as an array.
Otherwise, returns None.
"""
# Using cv2.RETR_LIST to prevent nesting contours
cnts = cv2.findContours(binary_img, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
# Sorts contours by size and gets largest contour
cnts = sorted(cnts, key=cv2.contourArea, reverse=True)
if len(cnts) == 0:
return None
largest_contour = cnts[0]
epsilon = 0.1*cv2.arcLength(largest_contour, True)
approx = cv2.approxPolyDP(largest_contour, epsilon, True)
# Extra code to visualize the image
# test_img = cv2.drawContours(cv2.cvtColor(binary_img.copy(), cv2.COLOR_GRAY2BGR), largest_contour, -1, (255, 0, 255), 8 # cv2.imshow("getRectangle Contour Map", test_img)
if len(approx) == 4:
return approx
return None
def transformImage(image, size, points):
""" Takes in the points of a rectangle and transforms and crops the image to it.
:param image: The colored, original image.
:param size: The size of the output image as an array [width, height]
:param points: The array of points representing the corners of the rectangle.
:return: A new image
"""
""" How to get points:
points[0-3][0][0-1]
^ four different points
^ 0 is y and 1 is x
b = 0 1 0 1 0 1 0 1
point[a][0][b] = [[279, 274], [279, 427], [440, 427], [440, 274]]
a = 0 1 2 3
"""
sorted(points, key=lambda point: point[0][1], reverse=True)
# get left side points
if(points[0][0][0] < points[1][0][0]):
topleft = points[0][0]
bottomleft = points[1][0]
else:
topleft = points[1][0]
bottomleft = points[0][0]
# get right side points
if(points[2][0][0] < points[3][0][0]):
topright = points[2][0]
bottomright = points[3][0]
else:
topright = points[3][0]
bottomright = points[2][0]
# print(points)
quadPoints = np.array([topleft, topright, bottomleft, bottomright], dtype="float32")
height = size[0]
width = size[1]
finalPoints = np.array([
[0, 0],
[height, 0],
[0, width],
[height, width]],
dtype="float32")
transform = cv2.getPerspectiveTransform(quadPoints, finalPoints) # Get matrix
output = cv2.warpPerspective(image, transform, size) # Transform image
return output
def scaleTupleArray(array, scale):
""" Returns a new array where new[i] = array[i] * scale
"""
output = [None] * len(array)
for i in range(len(array)):
output[i] = (int(array[i][0] * scale), int(array[i][1] * scale))
return output
def addTupleX(tup1, tup2):
""" Adds the x values of two tuples and returns a new tuple from them.
"""
x1, y1 = tup1
x2, y2 = tup2
return x1 + x2
if __name__ == '__main__':
# image = cv2.imread("images/test3.JPG")
end = (WIDTH * PIXELS_PER_CM, HEIGHT * PIXELS_PER_CM)
side = (LENGTH * PIXELS_PER_CM, HEIGHT * PIXELS_PER_CM)
top = (LENGTH * PIXELS_PER_CM, WIDTH * PIXELS_PER_CM)
sizes = [end, side, end, side, top]
imageSizes = scaleTupleArray(sizes, DISPLAY_IMAGE_SCALE)
x_0, y_0 = IMAGE_CORNER
img_width, img_height = imageSizes[0]
img_length, _ = imageSizes[1]
coords = [
(x_0, y_0),
(x_0 + img_width, y_0),
(x_0 + img_width + img_length, y_0),
(x_0 + (2 * img_width) + img_length, y_0),
(x_0 + img_width, y_0 - img_height)
]
imageIndex = 0
outputImages = [None] * 5
cap = cv2.VideoCapture(1)
if(cap.isOpened()):
print("Opened camera.")
filename = 'output.jpg'
output = np.zeros((125, 450, 3), np.uint8)
for x in range(125):
for y in range(450):
output[x, y] = [255, 255, 255]
while(imageIndex < len(sizes)):
# Capture frame-by-frame
ret, image = cap.read()
image = resizeWithAspectRatio(image, width=800)
warpedImage, points = getRectangleImage(image.copy(), sizes[imageIndex])
k = cv2.waitKey(1) & 0xFF
if k == ord('q'):
cv2.destroyAllWindows()
break
elif k == ord('c'):
captureButtonCurrentState = True
elif k == ord('b'):
captureButtonDeleteState = True
else:
captureButtonCurrentState = False
captureButtonDeleteState = False
for i in range(imageIndex): # from 0 to imageIndex, exclusive [0, imageIndex)
width, height = imageSizes[i]
x, y = coords[i]
resized_img = cv2.resize(outputImages[i].copy(), (width, height))
print("x: {}, y: {}, width: {}, height: {}".format(x, y, width, height))
print(resized_img.shape)
image[y: y + height, x: x + width] = resized_img
if (imageIndex != len(imageSizes)) and points is not None and warpedImage is not None:
cv2.drawContours(image, [points], -1, (0, 0, 255), 4)
width, height = imageSizes[imageIndex]
x, y = coords[imageIndex]
shrink_img = cv2.resize(warpedImage.copy(), (width, height))
image[y:y + height, x:x + width] = shrink_img
output[y:y + height, x:x + width] = shrink_img
if (captureButtonCurrentState and not captureButtonLastState):
# time to capture!!!
outputImages[imageIndex] = warpedImage
print("Captured image {}.".format(imageIndex + 1))
imageIndex += 1
elif (captureButtonDeleteState):
if (imageIndex > 0):
imageIndex -= 1
cv2.imshow("Camera View", image)
captureButtonLastState = captureButtonCurrentState
scale_percent = 300 # percent of original size
width = int(output.shape[1] * scale_percent / 100)
height = int(output.shape[0] * scale_percent / 100)
dim = (width, height)
resized = cv2.resize(output.copy(), dim)
cv2.imwrite(filename, resized)
else:
print("Failed to get camera.") | 36.296943 | 178 | 0.591795 |
74dcbd671d38f94e9eeedb8f811df847523d0a49 | 793 | py | Python | gencove/command/basespace/autoimports/autoimport_list/cli.py | gncv/gencove-cli | b4bcbe5b6a1506544472542af8b2384d21c7cbe4 | [
"Apache-2.0"
] | 1 | 2020-04-28T06:31:53.000Z | 2020-04-28T06:31:53.000Z | gencove/command/basespace/autoimports/autoimport_list/cli.py | gncv/gencove-cli | b4bcbe5b6a1506544472542af8b2384d21c7cbe4 | [
"Apache-2.0"
] | null | null | null | gencove/command/basespace/autoimports/autoimport_list/cli.py | gncv/gencove-cli | b4bcbe5b6a1506544472542af8b2384d21c7cbe4 | [
"Apache-2.0"
] | 1 | 2021-07-29T08:24:51.000Z | 2021-07-29T08:24:51.000Z | """Project autoimport Biosamples from BaseSpace projects shell command
definition.
"""
import click
from gencove.command.common_cli_options import add_options, common_options
from gencove.constants import Credentials, Optionals
from .main import BaseSpaceAutoImportList
@click.command("list")
@add_options(common_options)
def autoimport_list(
host,
email,
password,
api_key,
): # pylint: disable=line-too-long
"""Lists periodic import of BaseSpace projects (their Biosamples) jobs.
Examples:
List automatic import jobs of BaseSpace projects:
gencove basespace autoimports list
""" # noqa: E501
BaseSpaceAutoImportList(
Credentials(email=email, password=password, api_key=api_key),
Optionals(host=host),
).run()
| 24.030303 | 75 | 0.728878 |
08f33346f014e3326af6bb0d6b438bca8b50f0ec | 1,313 | py | Python | frappe/integrations/doctype/slack_webhook_url/slack_webhook_url.py | ektai/erp2Dodock | 5ad64b01cba9b07437f9a27751101258679379e8 | [
"MIT"
] | null | null | null | frappe/integrations/doctype/slack_webhook_url/slack_webhook_url.py | ektai/erp2Dodock | 5ad64b01cba9b07437f9a27751101258679379e8 | [
"MIT"
] | null | null | null | frappe/integrations/doctype/slack_webhook_url/slack_webhook_url.py | ektai/erp2Dodock | 5ad64b01cba9b07437f9a27751101258679379e8 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright (c) 2018, Frappe Technologies and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
from frappe.utils import get_url_to_form
from frappe import _
import requests
import json
error_messages = {
400: "400: Invalid Payload or User not found",
403: "403: Action Prohibited",
404: "404: Channel not found",
410: "410: The Channel is Archived",
500: "500: Rollup Error, Slack seems to be down"
}
class SlackWebhookURL(Document):
pass
def send_slack_message(webhook_url, message, reference_doctype, reference_name):
slack_url = frappe.db.get_value("Slack Webhook URL", webhook_url, "webhook_url")
doc_url = get_url_to_form(reference_doctype, reference_name)
attachments = [
{
"fallback": _("See the document at {0}").format(doc_url),
"actions": [
{
"type": "button",
"text": _("Go to the document"),
"url": doc_url,
"style": "primary"
}
]
}
]
data = {"text": message, "attachments": attachments}
r = requests.post(slack_url, data=json.dumps(data))
if not r.ok:
message = error_messages.get(r.status_code, r.status_code)
frappe.log_error(message, _('Slack Webhook Error'))
return 'error'
return 'success'
| 25.745098 | 81 | 0.711348 |
ca7b5d3d9ebbd98013ee22a0c3245521c2600644 | 96 | py | Python | mamba/_version.py | dmaljovec/mamba | 599c2820086b5d69df8c73a98b2a4acf5dffe0df | [
"BSD-3-Clause"
] | null | null | null | mamba/_version.py | dmaljovec/mamba | 599c2820086b5d69df8c73a98b2a4acf5dffe0df | [
"BSD-3-Clause"
] | null | null | null | mamba/_version.py | dmaljovec/mamba | 599c2820086b5d69df8c73a98b2a4acf5dffe0df | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
version_info = (0, 1, 1)
__version__ = '.'.join(map(str, version_info))
| 24 | 46 | 0.604167 |
bf45d6597a1778083f07488e9155ad0de5bfc396 | 2,982 | py | Python | setup.py | wzhaojin/thingsboard-gateway | 9f0253b66b135a9080de1c14e029024c4d010234 | [
"Apache-2.0"
] | null | null | null | setup.py | wzhaojin/thingsboard-gateway | 9f0253b66b135a9080de1c14e029024c4d010234 | [
"Apache-2.0"
] | null | null | null | setup.py | wzhaojin/thingsboard-gateway | 9f0253b66b135a9080de1c14e029024c4d010234 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2019. ThingsBoard
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from setuptools import setup
from os import path
this_directory = path.abspath(path.dirname(__file__))
with open(path.join(this_directory, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
VERSION = "2.4.1"
setup(
version=VERSION,
name="thingsboard-gateway",
author="ThingsBoard",
author_email="info@thingsboard.io",
license="Apache Software License (Apache Software License 2.0)",
description="Thingsboard Gateway for IoT devices.",
url="https://github.com/thingsboard/thingsboard-gateway",
long_description=long_description,
long_description_content_type="text/markdown",
include_package_data=True,
python_requires=">=3.5",
packages=['thingsboard_gateway', 'thingsboard_gateway.gateway', 'thingsboard_gateway.storage',
'thingsboard_gateway.tb_client', 'thingsboard_gateway.connectors', 'thingsboard_gateway.connectors.ble',
'thingsboard_gateway.connectors.mqtt', 'thingsboard_gateway.connectors.opcua', 'thingsboard_gateway.connectors.request',
'thingsboard_gateway.connectors.modbus', 'thingsboard_gateway.connectors.can', 'thingsboard_gateway.connectors.bacnet',
'thingsboard_gateway.connectors.bacnet.bacnet_utilities', 'thingsboard_gateway.connectors.odbc',
'thingsboard_gateway.tb_utility', 'thingsboard_gateway.extensions',
'thingsboard_gateway.extensions.mqtt', 'thingsboard_gateway.extensions.modbus', 'thingsboard_gateway.extensions.opcua',
'thingsboard_gateway.extensions.ble', 'thingsboard_gateway.extensions.serial', 'thingsboard_gateway.extensions.request',
'thingsboard_gateway.extensions.can', 'thingsboard_gateway.extensions.bacnet', 'thingsboard_gateway.extensions.odbc'
],
install_requires=[
'cffi',
'jsonpath-rw',
'regex',
'pip',
'jsonschema==3.1.1',
'lxml',
'paho-mqtt',
'pyserial',
'pytz',
'PyYAML',
'simplejson',
'requests'
],
download_url='https://github.com/thingsboard/thingsboard-gateway/archive/%s.tar.gz' % VERSION,
entry_points={
'console_scripts': [
'thingsboard-gateway = thingsboard_gateway.tb_gateway:daemon'
]},
package_data={
"*": ["config/*"]
})
| 40.849315 | 134 | 0.687793 |
b945143cc11070a2f151b1b14edfe12809d052cf | 9,759 | py | Python | Tools/ss14_ru/keyfinder.py | Runker1/space-station-14 | b4b6929249c0ec5c6593899fbc85557fb20bbc19 | [
"MIT"
] | null | null | null | Tools/ss14_ru/keyfinder.py | Runker1/space-station-14 | b4b6929249c0ec5c6593899fbc85557fb20bbc19 | [
"MIT"
] | 2 | 2022-01-05T04:21:16.000Z | 2022-01-06T04:51:47.000Z | Tools/ss14_ru/keyfinder.py | Runker1/space-station-14 | b4b6929249c0ec5c6593899fbc85557fb20bbc19 | [
"MIT"
] | null | null | null | import typing
import logging
from pydash import py_
from file import FluentFile
from fluentast import FluentAstAbstract
from fluentformatter import FluentFormatter
from project import Project
from fluent.syntax import ast, FluentParser, FluentSerializer
# Осуществляет актуализацию ключей. Находит файлы английского перевода, проверяет: есть ли русскоязычная пара
# Если нет - создаёт файл с копией переводов из англоязычного
# Далее, пофайлово проверяются ключи. Если в английском файле больше ключей - создает недостающие в русском, с английской копией перевода
# Отмечает русские файлы, в которых есть те ключи, что нет в аналогичных английских
# Отмечает русские файлы, у которых нет англоязычной пары
######################################### Class defifitions ############################################################
class RelativeFile:
def __init__(self, file: FluentFile, locale: typing.AnyStr, relative_path_from_locale: typing.AnyStr):
self.file = file
self.locale = locale
self.relative_path_from_locale = relative_path_from_locale
class FilesFinder:
def __init__(self, project: Project):
self.project: Project = project
self.created_files: typing.List[FluentFile] = []
def get_relative_path_dict(self, file: FluentFile, locale):
if locale == 'ru-RU':
return RelativeFile(file=file, locale=locale,
relative_path_from_locale=file.get_relative_path(self.project.ru_locale_dir_path))
elif locale == 'en-US':
return RelativeFile(file=file, locale=locale,
relative_path_from_locale=file.get_relative_path(self.project.en_locale_dir_path))
else:
raise Exception(f'Локаль {locale} не поддерживается')
def get_file_pair(self, en_file: FluentFile) -> typing.Tuple[FluentFile, FluentFile]:
ru_file_path = en_file.full_path.replace('en-US', 'ru-RU')
ru_file = FluentFile(ru_file_path)
return en_file, ru_file
def execute(self):
self.created_files = []
groups = self.get_files_pars()
keys_without_pair = list(filter(lambda g: len(groups[g]) < 2, groups))
for key_without_pair in keys_without_pair:
relative_file: RelativeFile = groups.get(key_without_pair)[0]
if relative_file.locale == 'en-US':
ru_file = self.create_ru_analog(relative_file)
self.created_files.append(ru_file)
elif relative_file.locale == 'ru-RU':
self.warn_en_analog_not_exist(relative_file)
else:
raise Exception(f'Файл {relative_file.file.full_path} имеет неизвестную локаль "{relative_file.locale}"')
return self.created_files
def get_files_pars(self):
en_fluent_files = self.project.get_fluent_files_by_dir(project.en_locale_dir_path)
ru_fluent_files = self.project.get_fluent_files_by_dir(project.ru_locale_dir_path)
en_fluent_relative_files = list(map(lambda f: self.get_relative_path_dict(f, 'en-US'), en_fluent_files))
ru_fluent_relative_files = list(map(lambda f: self.get_relative_path_dict(f, 'ru-RU'), ru_fluent_files))
relative_files = py_.flatten_depth(py_.concat(en_fluent_relative_files, ru_fluent_relative_files), depth=1)
return py_.group_by(relative_files, 'relative_path_from_locale')
def create_ru_analog(self, en_relative_file: RelativeFile) -> FluentFile:
en_file: FluentFile = en_relative_file.file
en_file_data = en_file.read_data()
ru_file_path = en_file.full_path.replace('en-US', 'ru-RU')
ru_file = FluentFile(ru_file_path)
ru_file.save_data(en_file_data)
logging.info(f'Создан файл {ru_file_path} с переводами из английского файла')
return ru_file
def warn_en_analog_not_exist(self, ru_relative_file: RelativeFile):
file: FluentFile = ru_relative_file.file
en_file_path = file.full_path.replace('ru-RU', 'en-US')
logging.warning(f'Файл {file.full_path} не имеет английского аналога по пути {en_file_path}')
class KeyFinder:
def __init__(self, files_dict):
self.files_dict = files_dict
self.changed_files: typing.List[FluentFile] = []
def execute(self) -> typing.List[FluentFile]:
self.changed_files = []
for pair in self.files_dict:
ru_relative_file = py_.find(self.files_dict[pair], {'locale': 'ru-RU'})
en_relative_file = py_.find(self.files_dict[pair], {'locale': 'en-US'})
if not en_relative_file or not ru_relative_file:
continue
ru_file: FluentFile = ru_relative_file.file
en_file: FluentFile = en_relative_file.file
self.compare_files(en_file, ru_file)
return self.changed_files
def compare_files(self, en_file, ru_file):
ru_file_parsed: ast.Resource = ru_file.parse_data(ru_file.read_data())
en_file_parsed: ast.Resource = en_file.parse_data(en_file.read_data())
self.write_to_ru_files(ru_file, ru_file_parsed, en_file_parsed)
self.log_not_exist_en_files(en_file, ru_file_parsed, en_file_parsed)
def write_to_ru_files(self, ru_file, ru_file_parsed, en_file_parsed):
for idx, en_message in enumerate(en_file_parsed.body):
if isinstance(en_message, ast.ResourceComment) or isinstance(en_message, ast.GroupComment) or isinstance(en_message, ast.Comment):
continue
ru_message_analog_idx = py_.find_index(ru_file_parsed.body, lambda ru_message: self.find_duplicate_message_id_name(ru_message, en_message))
have_changes = False
# Attributes
if getattr(en_message, 'attributes', None) and ru_message_analog_idx != -1:
if not ru_file_parsed.body[ru_message_analog_idx].attributes:
ru_file_parsed.body[ru_message_analog_idx].attributes = en_message.attributes
have_changes = True
else:
for en_attr in en_message.attributes:
ru_attr_analog = py_.find(ru_file_parsed.body[ru_message_analog_idx].attributes, lambda ru_attr: ru_attr.id.name == en_attr.id.name)
if not ru_attr_analog:
ru_file_parsed.body[ru_message_analog_idx].attributes.append(en_attr)
have_changes = True
# New elements
if ru_message_analog_idx == -1:
ru_file_body = ru_file_parsed.body
if (len(ru_file_body) >= idx + 1):
ru_file_parsed = self.append_message(ru_file_parsed, en_message, idx)
else:
ru_file_parsed = self.push_message(ru_file_parsed, en_message)
have_changes = True
if have_changes:
serialized = serializer.serialize(ru_file_parsed)
self.save_and_log_file(ru_file, serialized, en_message)
def log_not_exist_en_files(self, en_file, ru_file_parsed, en_file_parsed):
for idx, ru_message in enumerate(ru_file_parsed.body):
if isinstance(ru_message, ast.ResourceComment) or isinstance(ru_message, ast.GroupComment) or isinstance(ru_message, ast.Comment):
continue
en_message_analog = py_.find(en_file_parsed.body, lambda en_message: self.find_duplicate_message_id_name(ru_message, en_message))
if not en_message_analog:
logging.warning(f'Ключ "{FluentAstAbstract.get_id_name(ru_message)}" не имеет английского аналога по пути {en_file.full_path}"')
def append_message(self, ru_file_parsed, en_message, en_message_idx):
ru_message_part_1 = ru_file_parsed.body[0:en_message_idx]
ru_message_part_middle = [en_message]
ru_message_part_2 = ru_file_parsed.body[en_message_idx:]
new_body = py_.flatten_depth([ru_message_part_1, ru_message_part_middle, ru_message_part_2], depth=1)
ru_file_parsed.body = new_body
return ru_file_parsed
def push_message(self, ru_file_parsed, en_message):
ru_file_parsed.body.append(en_message)
return ru_file_parsed
def save_and_log_file(self, file, file_data, message):
file.save_data(file_data)
logging.info(f'В файл {file.full_path} добавлен ключ "{FluentAstAbstract.get_id_name(message)}"')
self.changed_files.append(file)
def find_duplicate_message_id_name(self, ru_message, en_message):
ru_element_id_name = FluentAstAbstract.get_id_name(ru_message)
en_element_id_name = FluentAstAbstract.get_id_name(en_message)
if not ru_element_id_name or not en_element_id_name:
return False
if ru_element_id_name == en_element_id_name:
return ru_message
else:
return None
######################################## Var definitions ###############################################################
logging.basicConfig(level = logging.INFO)
project = Project()
parser = FluentParser()
serializer = FluentSerializer(with_junk=True)
files_finder = FilesFinder(project)
key_finder = KeyFinder(files_finder.get_files_pars())
########################################################################################################################
print('Проверка актуальности файлов ...')
created_files = files_finder.execute()
if len(created_files):
print('Форматирование созданных файлов ...')
FluentFormatter.format(created_files)
print('Проверка актуальности ключей ...')
changed_files = key_finder.execute()
if len(changed_files):
print('Форматирование изменённых файлов ...')
FluentFormatter.format(changed_files)
| 44.766055 | 156 | 0.672507 |
f9f997e70b71e6452c028138eaec0ae79ac54387 | 1,015 | py | Python | tools/SDKTool/src/context/app_context.py | Passer-D/GameAISDK | a089330a30b7bfe1f6442258a12d8c0086240606 | [
"Apache-2.0"
] | 1,210 | 2020-08-18T07:57:36.000Z | 2022-03-31T15:06:05.000Z | tools/SDKTool/src/context/app_context.py | guokaiSama/GameAISDK | a089330a30b7bfe1f6442258a12d8c0086240606 | [
"Apache-2.0"
] | 37 | 2020-08-24T02:48:38.000Z | 2022-01-30T06:41:52.000Z | tools/SDKTool/src/context/app_context.py | guokaiSama/GameAISDK | a089330a30b7bfe1f6442258a12d8c0086240606 | [
"Apache-2.0"
] | 275 | 2020-08-18T08:35:16.000Z | 2022-03-31T15:06:07.000Z | # -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making GameAISDK available.
This source code file is licensed under the GNU General Public License Version 3.
For full details, please refer to the file "LICENSE.txt" which is provided as part of this source code package.
Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved.
"""
from ..common.singleton import Singleton
class AppContext(metaclass=Singleton):
__init = False
def __init__(self):
super(AppContext, self).__init__()
if not self.__init:
self.__context_info = dict()
self.__init = True
def set_info(self, info_key: str, info_value) -> bool:
if not isinstance(info_key, str):
return False
self.__context_info[info_key] = info_value
return True
def get_info(self, info_key: str, default_value=None):
return self.__context_info.get(info_key, default_value)
g_app_context = AppContext()
| 29.852941 | 111 | 0.694581 |
938122ef69dd225599f46b63ed42e7fc0ca6d1b8 | 47,924 | py | Python | aiocoap/protocol.py | HRogge/aiocoap | 9b249a22cb3b7316dfa7ebb32bafd2dc873df61a | [
"MIT"
] | null | null | null | aiocoap/protocol.py | HRogge/aiocoap | 9b249a22cb3b7316dfa7ebb32bafd2dc873df61a | [
"MIT"
] | null | null | null | aiocoap/protocol.py | HRogge/aiocoap | 9b249a22cb3b7316dfa7ebb32bafd2dc873df61a | [
"MIT"
] | null | null | null | # This file is part of the Python aiocoap library project.
#
# Copyright (c) 2012-2014 Maciej Wasilak <http://sixpinetrees.blogspot.com/>,
# 2013-2014 Christian Amsüss <c.amsuess@energyharvesting.at>
#
# aiocoap is free software, this file is published under the MIT license as
# described in the accompanying LICENSE file.
"""This module contains the classes that are responsible for keeping track of
messages:
* :class:`Context` roughly represents the CoAP endpoint (basically a UDP
socket) -- something that can send requests and possibly can answer
incoming requests.
* a :class:`Request` gets generated whenever a request gets sent to keep
track of the response
* a :class:`Responder` keeps track of a single incoming request
"""
import asyncio
import functools
import weakref
import time
from . import defaults
from .credentials import CredentialsMap
from .message import Message
from .optiontypes import BlockOption
from .messagemanager import MessageManager
from .tokenmanager import TokenManager, PlumbingRequest
from . import interfaces
from . import error
from .numbers import (INTERNAL_SERVER_ERROR, NOT_FOUND,
SERVICE_UNAVAILABLE, CONTINUE, REQUEST_ENTITY_INCOMPLETE,
OBSERVATION_RESET_TIME, MAX_TRANSMIT_WAIT)
from .numbers.optionnumbers import OptionNumber
import warnings
import logging
# log levels used:
# * debug is for things that occur even under perfect conditions.
# * info is for things that are well expected, but might be interesting during
# testing a network of nodes and not debugging the library. (timeouts,
# retransmissions, pings)
# * warning is for everything that indicates a malbehaved client. (these don't
# necessarily indicate a client bug, though; things like requesting a
# nonexistent block can just as well happen when a resource's content has
# changed between blocks).
def _extract_block_key(message):
"""Extract a key that hashes equally for all blocks of a blockwise
operation from a request message.
See discussion at <https://mailarchive.ietf.org/arch/msg/core/I-6LzAL6lIUVDA6_g9YM3Zjhg8E>.
"""
return (message.remote, message.get_cache_key([
OptionNumber.BLOCK1,
OptionNumber.BLOCK2,
OptionNumber.OBSERVE,
]))
class Context(interfaces.RequestProvider):
"""Applications' entry point to the network
A :class:`.Context` coordinates one or more network :mod:`.transports`
implementations and dispatches data between them and the application.
The application can start requests using the message dispatch methods, and
set a :class:`resources.Site` that will answer requests directed to the
application as a server.
On the library-internals side, it is the prime implementation of the
:class:`interfaces.RequestProvider` interface, creates :class:`Request` and
:class:`Response` classes on demand, and decides which transport
implementations to start and which are to handle which messages.
**Context creation and destruction**
The following functions are provided for creating and stopping a context:
.. note::
A typical application should only ever create one context, even (or
especially when) it acts both as a server and as a client (in which
case a server context should be created).
A context that is not used any more must be shut down using
:meth:`.shutdown()`, but typical applications will not need to because
they use the context for the full process lifetime.
.. automethod:: create_client_context
.. automethod:: create_server_context
.. automethod:: shutdown
**Dispatching messages**
CoAP requests can be sent using the following functions:
.. automethod:: request
.. automethod:: multicast_request
If more control is needed, you can create a :class:`Request` yourself and
pass the context to it.
**Other methods and properties**
The remaining methods and properties are to be considered unstable even
when the project reaches a stable version number; please file a feature
request for stabilization if you want to reliably access any of them.
(Sorry for the duplicates, still looking for a way to make autodoc list
everything not already mentioned).
"""
def __init__(self, loop=None, serversite=None, loggername="coap", client_credentials=None):
self.log = logging.getLogger(loggername)
self.loop = loop or asyncio.get_event_loop()
self.serversite = serversite
self.request_interfaces = []
self._running_renderings = set()
self.client_credentials = client_credentials or CredentialsMap()
# FIXME: consider introducing a TimeoutDict
self._block1_assemblies = {} # mapping block-key to (partial request, timeout handle)
self._block2_assemblies = {} # mapping block-key to (complete response, timeout handle)
# (for both, block-key is as extracted by _extract_block_key)
#
# convenience methods for class instanciation
#
async def _append_tokenmanaged_messagemanaged_transport(self, message_interface_constructor):
tman = TokenManager(self)
mman = MessageManager(tman)
transport = await message_interface_constructor(mman)
mman.message_interface = transport
tman.token_interface = mman
self.request_interfaces.append(tman)
async def _append_tokenmanaged_transport(self, token_interface_constructor):
tman = TokenManager(self)
transport = await token_interface_constructor(tman)
tman.token_interface = transport
self.request_interfaces.append(tman)
@classmethod
async def create_client_context(cls, *, loggername="coap", loop=None):
"""Create a context bound to all addresses on a random listening port.
This is the easiest way to get a context suitable for sending client
requests.
"""
if loop is None:
loop = asyncio.get_event_loop()
self = cls(loop=loop, serversite=None, loggername=loggername)
# FIXME make defaults overridable (postponed until they become configurable too)
for transportname in defaults.get_default_clienttransports(loop=loop):
if transportname == 'udp6':
from .transports.udp6 import MessageInterfaceUDP6
await self._append_tokenmanaged_messagemanaged_transport(
lambda mman: MessageInterfaceUDP6.create_client_transport_endpoint(mman, log=self.log, loop=loop))
elif transportname == 'simple6':
from .transports.simple6 import MessageInterfaceSimple6
await self._append_tokenmanaged_messagemanaged_transport(
lambda mman: MessageInterfaceSimple6.create_client_transport_endpoint(mman, log=self.log, loop=loop))
elif transportname == 'tinydtls':
from .transports.tinydtls import MessageInterfaceTinyDTLS
await self._append_tokenmanaged_messagemanaged_transport(
lambda mman: MessageInterfaceTinyDTLS.create_client_transport_endpoint(mman, log=self.log, loop=loop))
elif transportname == 'tcpclient':
from .transports.tcp import TCPClient
await self._append_tokenmanaged_transport(
lambda tman: TCPClient.create_client_transport(tman, self.log, loop))
elif transportname == 'tlsclient':
from .transports.tls import TLSClient
await self._append_tokenmanaged_transport(
lambda tman: TLSClient.create_client_transport(tman, self.log, loop))
elif transportname == 'oscore':
from .transports.oscore import TransportOSCORE
oscoretransport = TransportOSCORE(self, self)
self.request_interfaces.append(oscoretransport)
else:
raise RuntimeError("Transport %r not know for client context creation"%transportname)
return self
@classmethod
async def create_server_context(cls, site, bind=None, *, loggername="coap-server", loop=None, _ssl_context=None):
"""Create a context, bound to all addresses on the CoAP port (unless
otherwise specified in the ``bind`` argument).
This is the easiest way to get a context suitable both for sending
client and accepting server requests."""
if loop is None:
loop = asyncio.get_event_loop()
self = cls(loop=loop, serversite=site, loggername=loggername)
for transportname in defaults.get_default_servertransports(loop=loop):
if transportname == 'udp6':
from .transports.udp6 import MessageInterfaceUDP6
await self._append_tokenmanaged_messagemanaged_transport(
lambda mman: MessageInterfaceUDP6.create_server_transport_endpoint(mman, log=self.log, loop=loop, bind=bind))
# FIXME this is duplicated from the client version, as those are client-only anyway
elif transportname == 'simple6':
from .transports.simple6 import MessageInterfaceSimple6
await self._append_tokenmanaged_messagemanaged_transport(
lambda mman: MessageInterfaceSimple6.create_client_transport_endpoint(mman, log=self.log, loop=loop))
elif transportname == 'tinydtls':
from .transports.tinydtls import MessageInterfaceTinyDTLS
await self._append_tokenmanaged_messagemanaged_transport(
lambda mman: MessageInterfaceTinyDTLS.create_client_transport_endpoint(mman, log=self.log, loop=loop))
# FIXME end duplication
elif transportname == 'simplesocketserver':
from .transports.simplesocketserver import MessageInterfaceSimpleServer
await self._append_tokenmanaged_messagemanaged_transport(
lambda mman: MessageInterfaceSimpleServer.create_server(bind, mman, log=self.log, loop=loop))
elif transportname == 'tcpserver':
from .transports.tcp import TCPServer
await self._append_tokenmanaged_transport(
lambda tman: TCPServer.create_server(bind, tman, self.log, loop))
elif transportname == 'tcpclient':
from .transports.tcp import TCPClient
await self._append_tokenmanaged_transport(
lambda tman: TCPClient.create_client_transport(tman, self.log, loop))
elif transportname == 'tlsserver':
if _ssl_context is not None:
from .transports.tls import TLSServer
await self._append_tokenmanaged_transport(
lambda tman: TLSServer.create_server(bind, tman, self.log, loop, _ssl_context))
elif transportname == 'tlsclient':
from .transports.tls import TLSClient
await self._append_tokenmanaged_transport(
lambda tman: TLSClient.create_client_transport(tman, self.log, loop))
elif transportname == 'oscore':
from .transports.oscore import TransportOSCORE
oscoretransport = TransportOSCORE(self, self)
self.request_interfaces.append(oscoretransport)
else:
raise RuntimeError("Transport %r not know for server context creation"%transportname)
return self
async def shutdown(self):
"""Take down any listening sockets and stop all related timers.
After this coroutine terminates, and once all external references to
the object are dropped, it should be garbage-collectable.
This method may take the time to inform communications partners of
stopped observations (but currently does not)."""
self.log.debug("Shutting down context")
for _, canceler in self._block1_assemblies.values():
canceler()
for _, canceler in self._block2_assemblies.values():
canceler()
for r in self._running_renderings:
r.cancel()
done, pending = await asyncio.wait([ri.shutdown() for ri in self.request_interfaces], timeout=3, loop=self.loop)
for item in done:
await item
# FIXME: determine how official this should be, or which part of it is
# public -- now that BlockwiseRequest uses it. (And formalize what can
# change about messages and what can't after the remote has been thusly
# populated).
async def find_remote_and_interface(self, message):
for ri in self.request_interfaces:
if await ri.fill_or_recognize_remote(message):
return ri
raise RuntimeError("No request interface could route message")
def request(self, request_message, handle_blockwise=True):
if handle_blockwise:
return BlockwiseRequest(self, request_message)
plumbing_request = PlumbingRequest(request_message)
result = Request(plumbing_request, self.loop, self.log)
async def send():
try:
request_interface = await self.find_remote_and_interface(request_message)
request_interface.request(plumbing_request)
except Exception as e:
plumbing_request.add_exception(e)
return
self.loop.create_task(send())
return result
# the following are under consideration for moving into Site or something
# mixed into it
def render_to_plumbing_request(self, plumbing_request):
"""Satisfy a plumbing request from the full :meth:`render` /
:meth:`needs_blockwise_assembly` / :meth:`add_observation` interfaces
provided by the site."""
task = self.loop.create_task(
self._render_to_plumbing_request(plumbing_request))
self._running_renderings.add(task)
remove_task = functools.partial(self._running_renderings.remove, task)
task.add_done_callback(lambda result, cb=remove_task: cb())
async def _render_to_plumbing_request(self, plumbing_request):
# will receive a result in the finally, so the observation's
# cancellation callback can just be hooked into that rather than
# catching CancellationError here
cancellation_future = asyncio.Future()
def cleanup(cancellation_future=cancellation_future):
if not cancellation_future.done():
cancellation_future.set_result(None)
# not trying to cancel the whole rendering right now, as that would
# mean that we'll need to cancel the task in a way that won't cause a
# message sent back -- but reacting to an end of interest is very
# relevant when network errors arrive from observers.
plumbing_request.on_interest_end(cleanup)
try:
await self._render_to_plumbing_request_inner(plumbing_request,
cancellation_future)
except error.RenderableError as e:
# the repr() here is quite imporant for garbage collection
self.log.info("Render request raised a renderable error (%s), responding accordingly.", repr(e))
plumbing_request.add_response(e.to_message(), is_last=True)
except asyncio.CancelledError:
self.log.info("Rendering was interrupted, informing client")
plumbing_request.add_response(Message(code=SERVICE_UNAVAILABLE), is_last=True)
raise
except Exception as e:
plumbing_request.add_response(Message(code=INTERNAL_SERVER_ERROR), is_last=True)
self.log.error("An exception occurred while rendering a resource: %r", e, exc_info=e)
finally:
cleanup()
async def _render_to_plumbing_request_inner(self, plumbing_request, cancellation_future):
request = plumbing_request.request
if self.serversite is None:
plumbing_request.add_response(Message(code=NOT_FOUND, payload=b"not a server"), is_last=True)
return
needs_blockwise = await self.serversite.needs_blockwise_assembly(request)
if needs_blockwise:
block_key = _extract_block_key(request)
if needs_blockwise and request.opt.block2 and \
request.opt.block2.block_number != 0:
if request.opt.block1 is not None:
raise error.BadOption("Block1 conflicts with non-initial Block2")
try:
response, _ = self._block2_assemblies[block_key]
except KeyError:
plumbing_request.add_response(Message(
code=REQUEST_ENTITY_INCOMPLETE),
is_last=True)
self.log.info("Received unmatched blockwise response"
" operation message")
return
# FIXME: update the timeout? maybe remove item when last is
# requested in a confirmable message?
response = response._extract_block(
request.opt.block2.block_number,
request.opt.block2.size_exponent,
request.remote.maximum_payload_size
)
plumbing_request.add_response(
response,
is_last=True)
return
if needs_blockwise and request.opt.block1:
if request.opt.block1.block_number == 0:
if block_key in self._block1_assemblies:
_, canceler = self._block1_assemblies.pop(block_key)
canceler()
self.log.info("Aborting incomplete Block1 operation at"
" arrival of new start block")
new_aggregate = request
else:
try:
previous, canceler = self._block1_assemblies.pop(block_key)
except KeyError:
plumbing_request.add_response(Message(
code=REQUEST_ENTITY_INCOMPLETE),
is_last=True)
self.log.info("Received unmatched blockwise request"
" operation message")
return
canceler()
try:
previous._append_request_block(request)
except ValueError:
plumbing_request.add_response(Message(
code=REQUEST_ENTITY_INCOMPLETE),
is_last=True)
self.log.info("Failed to assemble blockwise request (gaps or overlaps)")
return
new_aggregate = previous
if request.opt.block1.more:
canceler = self.loop.call_later(
MAX_TRANSMIT_WAIT, # FIXME: introduce an actual parameter here
functools.partial(self._block1_assemblies.pop, block_key)
).cancel
self._block1_assemblies[block_key] = (new_aggregate, canceler)
plumbing_request.add_response(Message(
code=CONTINUE,
block1=BlockOption.BlockwiseTuple(
request.opt.block1.block_number,
True,
request.opt.block1.size_exponent),
),
is_last=True)
return
else:
immediate_response_block1 = request.opt.block1
request = new_aggregate
else:
immediate_response_block1 = None
observe_requested = request.opt.observe == 0
if observe_requested:
servobs = ServerObservation()
await self.serversite.add_observation(request, servobs)
if servobs._accepted:
cancellation_future.add_done_callback(
lambda f, cb=servobs._cancellation_callback: cb())
response = await self.serversite.render(request)
if response.code is None or not response.code.is_response():
self.log.warning("Response does not carry response code (%r),"
" application probably violates protocol.",
response.code)
if needs_blockwise and (
len(response.payload) > (
request.remote.maximum_payload_size
if request.opt.block2 is None
else request.opt.block2.size)):
if block_key in self._block2_assemblies:
_, canceler = self._block2_assemblies.pop(block_key)
canceler()
canceler = self.loop.call_later(
MAX_TRANSMIT_WAIT, # FIXME: introduce an actual parameter here
functools.partial(self._block2_assemblies.pop, block_key)
).cancel
self._block2_assemblies[block_key] = (response, canceler)
szx = request.opt.block2.size_exponent if request.opt.block2 is not None \
else request.remote.maximum_block_size_exp
# if a requested block2 number were not 0, the code would have
# diverted earlier to serve from active operations
response = response._extract_block(0, szx, request.remote.maximum_payload_size)
if needs_blockwise:
response.opt.block1 = immediate_response_block1
can_continue = observe_requested and servobs._accepted and \
response.code.is_successful()
if observe_requested:
# see comment on _early_deregister in ServerObservation
if servobs._early_deregister:
can_continue = False
servobs._early_deregister = None
if can_continue:
# FIXME: observation numbers should actually not be per
# asyncio.task, but per (remote, token). if a client renews an
# observation (possibly with a new ETag or whatever is deemed
# legal), the new observation events should still carry larger
# numbers. (if they did not, the client might be tempted to discard
# them).
response.opt.observe = next_observation_number = 0
plumbing_request.add_response(response, is_last=not can_continue)
while can_continue:
await servobs._trigger
# if you wonder why the lines around this are not just `response =
# await servobs._trigger`, have a look at the 'double' tests in
# test_observe.py: A later triggering could have replaced
# servobs._trigger in the meantime.
response = servobs._trigger.result()
servobs._trigger = asyncio.Future()
if response is None:
response = await self.serversite.render(request)
if response.code is None or not response.code.is_response():
self.log.warning("Response does not carry response code (%r),"
" application probably violates protocol.",
response.code)
can_continue = response.code.is_successful() and \
not servobs._late_deregister
if can_continue:
## @TODO handle situations in which this gets called more often than
# 2^32 times in 256 seconds (or document why we can be sure that
# that will not happen)
next_observation_number = next_observation_number + 1
response.opt.observe = next_observation_number
plumbing_request.add_response(response, is_last=not can_continue)
class BaseRequest(object):
"""Common mechanisms of :class:`Request` and :class:`MulticastRequest`"""
class BaseUnicastRequest(BaseRequest):
"""A utility class that offers the :attr:`response_raising` and
:attr:`response_nonraising` alternatives to waiting for the
:attr:`response` future whose error states can be presented either as an
unsuccessful response (eg. 4.04) or an exception.
It also provides some internal tools for handling anything that has a
:attr:`response` future and an :attr:`observation`"""
@property
async def response_raising(self):
"""An awaitable that returns if a response comes in and is successful,
otherwise raises generic network exception or a
:class:`.error.ResponseWrappingError` for unsuccessful responses.
Experimental Interface."""
response = await self.response
if not response.code.is_successful():
raise error.ResponseWrappingError(response)
return response
@property
async def response_nonraising(self):
"""An awaitable that rather returns a 500ish fabricated message (as a
proxy would return) instead of raising an exception.
Experimental Interface."""
try:
return await self.response
except error.RenderableError as e:
return e.to_message()
except Exception:
return Message(code=INTERNAL_SERVER_ERROR)
class Request(interfaces.Request, BaseUnicastRequest):
# FIXME: Implement timing out with REQUEST_TIMEOUT here
def __init__(self, plumbing_request, loop, log):
self._plumbing_request = plumbing_request
self.response = asyncio.Future()
if plumbing_request.request.opt.observe == 0:
self.observation = ClientObservation()
else:
self.observation = None
self._runner = loop.create_task(self._run())
self.log = log
self.response.add_done_callback(self._response_cancellation_handler)
def _response_cancellation_handler(self, response):
if self.response.cancelled() and not self._runner.cancelled():
self._runner.cancel()
self._plumbing_request.stop_interest()
@staticmethod
def _add_response_properties(response, request):
response.request = request
async def _run(self):
# FIXME: check that responses come from the same remmote as long as we're assuming unicast
first_event = await self._plumbing_request._events.get()
if first_event.message is not None:
self._add_response_properties(first_event.message, self._plumbing_request.request)
self.response.set_result(first_event.message)
else:
self.response.set_exception(first_event.exception)
if self.observation is None:
if not first_event.is_last:
self.log.error("PlumbingRequest indicated more possible responses"
" while the Request handler would not know what to"
" do with them, stopping any further request.")
self._plumbing_request.stop_interest()
return
if first_event.is_last:
self.observation.error(error.NotObservable())
return
if first_event.message.opt.observe is None:
self.log.error("PlumbingRequest indicated more possible responses"
" while the Request handler would not know what to"
" do with them, stopping any further request.")
self._plumbing_request.stop_interest()
return
# variable names from RFC7641 Section 3.4
v1 = first_event.message.opt.observe
t1 = time.time()
while True:
# We don't really support cancellation of observations yet (see
# https://github.com/chrysn/aiocoap/issues/92), but at least
# stopping the interest is a way to free the local resources after
# the first observation update, and to make the MID handler RST the
# observation on the next.
# FIXME: there *is* now a .on_cancel callback, we should at least
# hook into that, and possibly even send a proper cancellation
# then.
next_event = await self._plumbing_request._events.get()
if self.observation.cancelled:
self._plumbing_request.stop_interest()
return
if next_event.exception is not None:
self.observation.error(next_event.exception)
if not next_event.is_last:
self._plumbing_request.stop_interest()
return
self._add_response_properties(next_event.message, self._plumbing_request.request)
if next_event.message.opt.observe is not None:
# check for reordering
v2 = next_event.message.opt.observe
t2 = time.time()
is_recent = (v1 < v2 and v2 - v1 < 2**23) or \
(v1 > v2 and v1 - v2 > 2**23) or \
(t2 > t1 + OBSERVATION_RESET_TIME)
if is_recent:
t1 = t2
v1 = v2
else:
# the terminal message is always the last
is_recent = True
if is_recent:
self.observation.callback(next_event.message)
if next_event.is_last:
self.observation.error(error.ObservationCancelled())
return
if next_event.message.opt.observe is None:
self.observation.error(error.ObservationCancelled())
self.log.error("PlumbingRequest indicated more possible responses"
" while the Request handler would not know what to"
" do with them, stopping any further request.")
self._plumbing_request.stop_interest()
return
class BlockwiseRequest(BaseUnicastRequest, interfaces.Request):
def __init__(self, protocol, app_request):
self.protocol = protocol
self.log = self.protocol.log.getChild("blockwise-requester")
self.response = asyncio.Future()
if app_request.opt.observe is not None:
self.observation = ClientObservation()
else:
self.observation = None
self._runner = asyncio.Task(self._run_outer(
app_request,
self.response,
weakref.ref(self.observation) if self.observation is not None else lambda: None,
self.protocol,
self.log,
))
self.response.add_done_callback(self._response_cancellation_handler)
def _response_cancellation_handler(self, response_future):
if self.response.cancelled() and not self._runner.cancelled():
self._runner.cancel()
@classmethod
async def _run_outer(cls, app_request, response, weak_observation, protocol, log):
try:
await cls._run(app_request, response, weak_observation, protocol, log)
except asyncio.CancelledError:
pass # results already set
except Exception as e:
logged = False
if not response.done():
logged = True
response.set_exception(e)
obs = weak_observation()
if app_request.opt.observe is not None and obs is not None:
logged = True
obs.error(e)
if not logged:
# should be unreachable
log.error("Exception in BlockwiseRequest runner neither went to response nor to observation: %s", e, exc_info=e)
# This is a class method because that allows self and self.observation to
# be freed even when this task is running, and the task to stop itself --
# otherwise we couldn't know when users just "forget" about a request
# object after using its response (esp. in observe cases) and leave this
# task running.
@classmethod
async def _run(cls, app_request, response, weak_observation, protocol, log):
# we need to populate the remote right away, because the choice of
# blocks depends on it.
await protocol.find_remote_and_interface(app_request)
size_exp = app_request.remote.maximum_block_size_exp
if app_request.opt.block1 is not None:
assert app_request.opt.block1.block_number == 0, "Unexpected block number in app_request"
assert app_request.opt.block1.more == False, "Unexpected more-flag in app_request"
# this is where the library user can traditionally pass in size
# exponent hints into the library.
size_exp = app_request.opt.block1.size_exponent
# Offset in the message in blocks of size_exp. Whoever changes size_exp
# is responsible for updating this number.
block_cursor = 0
while True:
# ... send a chunk
if len(app_request.payload) > (2 ** (size_exp + 4)):
current_block1 = app_request._extract_block(
block_cursor,
size_exp,
app_request.remote.maximum_payload_size)
else:
current_block1 = app_request
blockrequest = protocol.request(current_block1, handle_blockwise=False)
blockresponse = await blockrequest.response
# store for future blocks to ensure that the next blocks will be
# sent from the same source address (in the UDP case; for many
# other transports it won't matter).
app_request.remote = blockresponse.remote
if blockresponse.opt.block1 is None:
if blockresponse.code.is_successful() and current_block1.opt.block1:
log.warning("Block1 option completely ignored by server, assuming it knows what it is doing.")
# FIXME: handle 4.13 and retry with the indicated size option
break
block1 = blockresponse.opt.block1
log.debug("Response with Block1 option received, number = %d, more = %d, size_exp = %d.", block1.block_number, block1.more, block1.size_exponent)
if block1.block_number != current_block1.opt.block1.block_number:
raise error.UnexpectedBlock1Option("Block number mismatch")
if size_exp == 7:
block_cursor += len(current_block1.payload) // 1024
else:
block_cursor += 1
while block1.size_exponent < size_exp:
block_cursor *= 2
size_exp -= 1
if not current_block1.opt.block1.more:
if block1.more or blockresponse.code == CONTINUE:
# treating this as a protocol error -- letting it slip
# through would misrepresent the whole operation as an
# over-all 2.xx (successful) one.
raise error.UnexpectedBlock1Option("Server asked for more data at end of body")
break
# checks before preparing the next round:
if blockresponse.opt.observe:
# we're not *really* interested in that block, we just sent an
# observe option to indicate that we'll want to observe the
# resulting representation as a whole
log.warning("Server answered Observe in early Block1 phase, cancelling the erroneous observation.")
blockrequest.observe.cancel()
if block1.more:
# FIXME i think my own server is dowing this wrong
#if response.code != CONTINUE:
# raise error.UnexpectedBlock1Option("more-flag set but no Continue")
pass
else:
if not blockresponse.code.is_successful():
break
else:
# ignoring (discarding) the successul intermediate result, waiting for a final one
continue
lower_observation = None
if app_request.opt.observe is not None:
if blockresponse.opt.observe is not None:
lower_observation = blockrequest.observation
else:
obs = weak_observation()
if obs:
obs.error(error.NotObservable())
del obs
assert blockresponse is not None, "Block1 loop broke without setting a response"
blockresponse.opt.block1 = None
# FIXME check with RFC7959: it just says "send requests similar to the
# requests in the Block1 phase", what does that mean? using the last
# block1 as a reference for now, especially because in the
# only-one-request-block case, that's the original request we must send
# again and again anyway
assembled_response = await cls._complete_by_requesting_block2(protocol, current_block1, blockresponse, log)
response.set_result(assembled_response)
# finally set the result
if lower_observation is not None:
# FIXME this can all be simplified a lot since it's no more
# expected that observations shut themselves down when GC'd.
obs = weak_observation()
del weak_observation
if obs is None:
lower_observation.cancel()
return
future_weak_observation = asyncio.Future() # packing this up because its destroy callback needs to reference the subtask
subtask = asyncio.Task(cls._run_observation(app_request, lower_observation, future_weak_observation, protocol, log))
future_weak_observation.set_result(weakref.ref(obs, lambda obs: subtask.cancel()))
obs.on_cancel(subtask.cancel)
del obs
await subtask
@classmethod
async def _run_observation(cls, original_request, lower_observation, future_weak_observation, protocol, log):
weak_observation = await future_weak_observation
# we can use weak_observation() here at any time, because whenever that
# becomes None, this task gets cancelled
try:
async for block1_notification in lower_observation:
log.debug("Notification received")
full_notification = await cls._complete_by_requesting_block2(protocol, original_request, block1_notification, log)
log.debug("Reporting completed notification")
weak_observation().callback(full_notification)
# FIXME verify that this loop actually ends iff the observation
# was cancelled -- otherwise find out the cause(s) or make it not
# cancel under indistinguishable circumstances
weak_observation().error(error.ObservationCancelled())
except asyncio.CancelledError:
return
except Exception as e:
weak_observation().error(e)
@classmethod
async def _complete_by_requesting_block2(cls, protocol, request_to_repeat, initial_response, log):
# FIXME this can probably be deduplicated against BlockwiseRequest
if initial_response.opt.block2 is None or initial_response.opt.block2.more is False:
initial_response.opt.block2 = None
return initial_response
if initial_response.opt.block2.block_number != 0:
log.error("Error assembling blockwise response (expected first block)")
raise error.UnexpectedBlock2()
assembled_response = initial_response
last_response = initial_response
while True:
current_block2 = request_to_repeat._generate_next_block2_request(assembled_response)
current_block2 = current_block2.copy(remote=initial_response.remote)
blockrequest = protocol.request(current_block2, handle_blockwise=False)
last_response = await blockrequest.response
if last_response.opt.block2 is None:
log.warning("Server sent non-blockwise response after having started a blockwise transfer. Blockwise transfer cancelled, accepting single response.")
return last_response
block2 = last_response.opt.block2
log.debug("Response with Block2 option received, number = %d, more = %d, size_exp = %d.", block2.block_number, block2.more, block2.size_exponent)
try:
assembled_response._append_response_block(last_response)
except error.Error as e:
log.error("Error assembling blockwise response, passing on error %r"%e)
raise
if block2.more is False:
return assembled_response
class ClientObservation:
"""An interface to observe notification updates arriving on a request.
This class does not actually provide any of the observe functionality, it
is purely a container for dispatching the messages via callbacks or
asynchronous iteration. It gets driven (ie. populated with responses or
errors including observation termination) by a Request object.
"""
def __init__(self):
self.callbacks = []
self.errbacks = []
self.cancelled = False
self._on_cancel = []
self._latest_response = None
# the analogous error is stored in _cancellation_reason when cancelled.
def __aiter__(self):
"""`async for` interface to observations. Currently, this still loses
information to the application (the reason for the termination is
unclear).
Experimental Interface."""
it = self._Iterator()
self.register_callback(it.push)
self.register_errback(it.push_err)
return it
class _Iterator:
def __init__(self):
self._future = asyncio.Future()
def push(self, item):
if self._future.done():
# we don't care whether we overwrite anything, this is a lossy queue as observe is lossy
self._future = asyncio.Future()
self._future.set_result(item)
def push_err(self, e):
if self._future.done():
self._future = asyncio.Future()
self._future.set_exception(e)
async def __anext__(self):
f = self._future
try:
result = await self._future
# FIXME see `await servobs._trigger` comment: might waiting for
# the original future not yield the first future's result when
# a quick second future comes in in a push?
if f is self._future:
self._future = asyncio.Future()
return result
except (error.NotObservable, error.ObservationCancelled):
# only exit cleanly when the server -- right away or later --
# states that the resource is not observable any more
# FIXME: check whether an unsuccessful message is still passed
# as an observation result (or whether it should be)
raise StopAsyncIteration
def __del__(self):
if self._future.done():
try:
# Fetch the result so any errors show up at least in the
# finalizer output
self._future.result()
except (error.ObservationCancelled, error.NotObservable):
# This is the case at the end of an observation cancelled
# by the server.
pass
except error.LibraryShutdown:
pass
def register_callback(self, callback):
"""Call the callback whenever a response to the message comes in, and
pass the response to it."""
if self.cancelled:
return
self.callbacks.append(callback)
if self._latest_response is not None:
callback(self._latest_response)
def register_errback(self, callback):
"""Call the callback whenever something goes wrong with the
observation, and pass an exception to the callback. After such a
callback is called, no more callbacks will be issued."""
if self.cancelled:
callback(self._cancellation_reason)
return
self.errbacks.append(callback)
def callback(self, response):
"""Notify all listeners of an incoming response"""
self._latest_response = response
for c in self.callbacks:
c(response)
def error(self, exception):
"""Notify registered listeners that the observation went wrong. This
can only be called once."""
for c in self.errbacks:
c(exception)
self.cancel()
self._cancellation_reason = exception
def cancel(self):
# FIXME determine whether this is called by anything other than error,
# and make it private so there is always a _cancellation_reason
"""Cease to generate observation or error events. This will not
generate an error by itself."""
assert self.cancelled == False
# make sure things go wrong when someone tries to continue this
self.errbacks = None
self.callbacks = None
self.cancelled = True
while self._on_cancel:
self._on_cancel.pop()()
self._cancellation_reason = None
def on_cancel(self, callback):
if self.cancelled:
callback()
self._on_cancel.append(callback)
def __repr__(self):
return '<%s %s at %#x>'%(type(self).__name__, "(cancelled)" if self.cancelled else "(%s call-, %s errback(s))"%(len(self.callbacks), len(self.errbacks)), id(self))
class ServerObservation:
def __init__(self):
self._accepted = False
self._trigger = asyncio.Future()
# A deregistration is "early" if it happens before the response message
# is actually sent; calling deregister() in that time (typically during
# `render()`) will not send an unsuccessful response message but just
# sent this flag which is set to None as soon as it is too late for an
# early deregistration.
# This mechanism is temporary until more of aiocoap behaves like
# PlumbingRequest which does not suffer from this limitation.
self._early_deregister = False
self._late_deregister = False
def accept(self, cancellation_callback):
self._accepted = True
self._cancellation_callback = cancellation_callback
def deregister(self, reason=None):
if self._early_deregister is False:
self._early_deregister = True
return
warnings.warn("Late use of ServerObservation.deregister() is"
" deprecated, use .trigger with an unsuccessful value"
" instead",
DeprecationWarning)
self.trigger(Message(code=INTERNAL_SERVER_ERROR, payload=b"Resource became unobservable"))
def trigger(self, response=None, *, is_last=False):
"""Send an updated response; if None is given, the observed resource's
rendering will be invoked to produce one.
`is_last` can be set to True to indicate that no more responses will be
sent. Note that an unsuccessful response will be the last no matter
what is_last says, as such a message always terminates a CoAP
observation."""
if is_last:
self._late_deregister = True
if self._trigger.done():
# we don't care whether we overwrite anything, this is a lossy queue as observe is lossy
self._trigger = asyncio.Future()
self._trigger.set_result(response)
| 43.135914 | 171 | 0.635485 |
e519940bd490d6a53ebf258268d117c91bb85f41 | 1,984 | py | Python | python/chapter-5/hello_world_mirrored_queue_consumer_selective_nodes.py | Vekaco/rabbitmq-in-action | 1a4afd73d4ec466b942192a2b289adf871b91600 | [
"BSD-2-Clause"
] | 270 | 2015-01-03T04:22:25.000Z | 2021-12-28T03:08:02.000Z | python/chapter-5/hello_world_mirrored_queue_consumer_selective_nodes.py | Vekaco/rabbitmq-in-action | 1a4afd73d4ec466b942192a2b289adf871b91600 | [
"BSD-2-Clause"
] | 23 | 2020-02-12T02:35:49.000Z | 2022-02-11T03:45:40.000Z | python/chapter-5/hello_world_mirrored_queue_consumer_selective_nodes.py | Vekaco/rabbitmq-in-action | 1a4afd73d4ec466b942192a2b289adf871b91600 | [
"BSD-2-Clause"
] | 174 | 2015-01-22T23:48:15.000Z | 2022-03-06T12:38:52.000Z | ###############################################
# RabbitMQ in Action
# Chapter 5 - Hello World Consumer
# (Mirrored Queues)
#
# Requires: pika >= 0.9.5
#
# Author: Jason J. W. Williams
# (C)2011
###############################################
import pika
credentials = pika.PlainCredentials("guest", "guest")
conn_params = pika.ConnectionParameters("localhost",
credentials = credentials)
conn_broker = pika.BlockingConnection(conn_params) #/(hwcmq.1) Establish connection to broker
channel = conn_broker.channel() #/(hwcmq.2) Obtain channel
channel.exchange_declare(exchange="hello-exchange", #/(hwcmq.3) Declare the exchange
type="direct",
passive=False,
durable=True,
auto_delete=False)
queue_args = {"x-ha-policy" : "nodes",
"x-ha-policy-params" : ["rabbit@Phantome",
"rabbit2@Phantome"]} #/(hwcmq.4) Set queue mirroring policy
channel.queue_declare(queue="hello-queue", arguments=queue_args) #/(hwcmq.5) Declare the queue
channel.queue_bind(queue="hello-queue", #/(hwcmq.6) Bind the queue and exchange together on the key "hola"
exchange="hello-exchange",
routing_key="hola")
def msg_consumer(channel, method, header, body): #/(hwcmq.7) Make function to process incoming messages
channel.basic_ack(delivery_tag=method.delivery_tag) #/(hwcmq.8) Message acknowledgement
if body == "quit":
channel.basic_cancel(consumer_tag="hello-consumer") #/(hwcmq.9) Stop consuming more messages and quit
channel.stop_consuming()
else:
print body
return
channel.basic_consume( msg_consumer, #/(hwc.9) Subscribe our consumer
queue="hello-queue",
consumer_tag="hello-consumer")
channel.start_consuming() #/(hwc.10) Start consuming | 34.807018 | 110 | 0.587198 |
7d3e5d7b03494f7fe0297d1036c81a16d8d97c3f | 31 | py | Python | market_characterization/tests.py | DavimenUC3M/IronIA-RoboAdvisor | 06d37889d5cb9c40139ceb6a41c959b92fff3291 | [
"MIT"
] | null | null | null | market_characterization/tests.py | DavimenUC3M/IronIA-RoboAdvisor | 06d37889d5cb9c40139ceb6a41c959b92fff3291 | [
"MIT"
] | null | null | null | market_characterization/tests.py | DavimenUC3M/IronIA-RoboAdvisor | 06d37889d5cb9c40139ceb6a41c959b92fff3291 | [
"MIT"
] | 2 | 2022-01-31T21:56:44.000Z | 2022-02-02T10:28:00.000Z | import investpy
import pandas
| 7.75 | 15 | 0.83871 |
dd9c23efb16af95d91acca83db5eb4d3ff9a9426 | 154 | py | Python | hobby/route.py | maoxuelin083/restful-Blog | 29a08613d71ee6c369dcec9ffb09a2a2ed837431 | [
"Apache-2.0"
] | null | null | null | hobby/route.py | maoxuelin083/restful-Blog | 29a08613d71ee6c369dcec9ffb09a2a2ed837431 | [
"Apache-2.0"
] | null | null | null | hobby/route.py | maoxuelin083/restful-Blog | 29a08613d71ee6c369dcec9ffb09a2a2ed837431 | [
"Apache-2.0"
] | null | null | null | from flask_restful import Api
from hobby.views import HobbyResource
hobby_api = Api(prefix='/hobby')
hobby_api.add_resource(HobbyResource, '/')
| 19.25 | 43 | 0.753247 |
13c191328b2b919de7bba25ffb2e0d5d6fee3549 | 7,195 | py | Python | kolibri/plugins/coach/test/test_coach_api.py | prathamopenschool1/kolibri | 395928c206c4c750de9fddcacdd8ca040c3014ff | [
"MIT"
] | null | null | null | kolibri/plugins/coach/test/test_coach_api.py | prathamopenschool1/kolibri | 395928c206c4c750de9fddcacdd8ca040c3014ff | [
"MIT"
] | null | null | null | kolibri/plugins/coach/test/test_coach_api.py | prathamopenschool1/kolibri | 395928c206c4c750de9fddcacdd8ca040c3014ff | [
"MIT"
] | 1 | 2019-11-12T14:00:30.000Z | 2019-11-12T14:00:30.000Z | """
To run this test, type this in command line <kolibri manage test -- kolibri.content>
"""
import datetime
from django.core.urlresolvers import reverse
from django.utils import timezone
from rest_framework.test import APITestCase
from kolibri.auth.constants import collection_kinds
from kolibri.auth.constants import role_kinds
from kolibri.auth.models import Facility
from kolibri.auth.models import FacilityUser
from kolibri.auth.models import Role
from kolibri.auth.test.helpers import provision_device
from kolibri.content import models as content
from kolibri.logger.models import ContentSummaryLog
class ContentReportAPITestCase(APITestCase):
"""
Testcase for content API methods
"""
fixtures = ['content_test.json']
the_channel_id = "6199dde695db4ee4ab392222d5af1e5c"
def setUp(self):
provision_device()
def _reverse_channel_url(self, pattern_name, extra_kwargs={}):
"""Helper method to reverse a URL using the current channel ID"""
kwargs = {"channel_id": self.the_channel_id}
kwargs.update(extra_kwargs)
return reverse(pattern_name, kwargs=kwargs)
def test_contentreport_progress(self):
# set up data for testing progress_fraction field on content node endpoint
facility = Facility.objects.create(name="MyFac")
user = FacilityUser.objects.create(username="learner", facility=facility)
user.set_password("pass")
user.save()
admin = FacilityUser.objects.create(username="admin", facility=facility)
admin.set_password("pass")
admin.save()
Role.objects.create(user=admin, collection=facility, kind=role_kinds.ADMIN)
root = content.ContentNode.objects.get(title="root")
c2 = content.ContentNode.objects.get(title="c2")
c2c1 = content.ContentNode.objects.get(title="c2c1")
c2c3 = content.ContentNode.objects.get(title="c2c3")
for node, progress in [(c2c1, 0.7), (c2c3, 0.5)]:
ContentSummaryLog.objects.create(
user=user,
content_id=node.content_id,
progress=progress,
kind=node.kind,
channel_id=self.the_channel_id,
start_timestamp=datetime.datetime.now()
)
def assert_progress(node, progress):
response = self.client.get(self._reverse_channel_url("kolibri:coach:contentreport-list", {
'content_node_id': node.id,
'collection_kind': collection_kinds.FACILITY,
'collection_id': facility.id,
}))
for i, prog in enumerate(progress):
self.assertEqual(response.data[i]["progress"], prog)
# check that progress is calculated appropriately when user is logged in
self.client.login(username="admin", password="pass", facility=facility)
# Topic so None
assert_progress(root, [
[{'log_count_total': 0, 'total_progress': 0.0, 'log_count_complete': 0}],
[
{'kind': 'audio', 'node_count': 1, 'total_progress': 0.5},
{'kind': 'document', 'node_count': 1, 'total_progress': 0.0},
{'kind': 'exercise', 'node_count': 1, 'total_progress': 0.7}
]
])
assert_progress(c2, [
[{'log_count_complete': 0, 'log_count_total': 1, 'total_progress': 0.7}],
[{'log_count_complete': 0, 'log_count_total': 0, 'total_progress': 0.0}],
[{'log_count_complete': 0, 'log_count_total': 1, 'total_progress': 0.5}]
])
def test_recentreport_time_filtering(self):
# set up data for testing time filtering on content node endpoint
facility = Facility.objects.create(name="MyFac")
user = FacilityUser.objects.create(username="learner", facility=facility)
user.set_password("pass")
user.save()
admin = FacilityUser.objects.create(username="admin", facility=facility)
admin.set_password("pass")
admin.save()
Role.objects.create(user=admin, collection=facility, kind=role_kinds.ADMIN)
root = content.ContentNode.objects.get(title="root")
c2c1 = content.ContentNode.objects.get(title="c2c1")
c2c3 = content.ContentNode.objects.get(title="c2c3")
start_date = datetime.datetime(2000, 1, 1)
date_1 = timezone.now() - datetime.timedelta(8)
date_2 = timezone.now() - datetime.timedelta(6)
for date, node in ((date_1, c2c1), (date_2, c2c3)):
ContentSummaryLog.objects.create(
user=user,
content_id=node.content_id,
progress=1.0,
kind=node.kind,
channel_id=self.the_channel_id,
start_timestamp=start_date,
end_timestamp=date,
)
# check that only the log less than 7 days ago returns from recent report
self.client.login(username="admin", password="pass", facility=facility)
response = self.client.get(self._reverse_channel_url("kolibri:coach:recentreport-list", {
'content_node_id': root.id,
'collection_kind': collection_kinds.FACILITY,
'collection_id': facility.id,
}))
self.assertEqual(len(response.data), 1)
self.assertEqual(response.data[0]['pk'], c2c3.pk)
def test_recentreport_unique_filtering(self):
# set up data for testing time filtering on content node endpoint
facility = Facility.objects.create(name="MyFac")
user = FacilityUser.objects.create(username="learner", facility=facility)
user.set_password("pass")
user.save()
admin = FacilityUser.objects.create(username="admin", facility=facility)
admin.set_password("pass")
admin.save()
Role.objects.create(user=admin, collection=facility, kind=role_kinds.ADMIN)
root = content.ContentNode.objects.get(title="root")
c2c1 = content.ContentNode.objects.get(title="c2c1")
new_id = c2c1.id[:-1] + '1'
content.ContentNode.objects.create(
id=new_id,
content_id=c2c1.content_id,
kind=c2c1.kind,
channel_id=c2c1.channel_id,
parent=root,
available=True,
title=c2c1.title,
)
start_date = datetime.datetime(2000, 1, 1)
date = timezone.now() - datetime.timedelta(6)
ContentSummaryLog.objects.create(
user=user,
content_id=c2c1.content_id,
progress=1.0,
kind=c2c1.kind,
channel_id=self.the_channel_id,
start_timestamp=start_date,
end_timestamp=date,
)
# check that only the log less than 7 days ago returns from recent report
self.client.login(username="admin", password="pass", facility=facility)
response = self.client.get(self._reverse_channel_url("kolibri:coach:recentreport-list", {
'content_node_id': root.id,
'collection_kind': collection_kinds.FACILITY,
'collection_id': facility.id,
}))
self.assertEqual(len(response.data), 1)
| 39.532967 | 102 | 0.634329 |
6a7f14110a6181e39425197cf81cfb1b578d7821 | 2,815 | py | Python | src/bitcaster/dispatchers/handlers/zulip.py | bitcaster-io/bitcaster | 9f1bad96e00e3bc78a22451731e231d30662b166 | [
"BSD-3-Clause"
] | 4 | 2018-03-01T10:22:30.000Z | 2020-04-04T16:31:11.000Z | src/bitcaster/dispatchers/handlers/zulip.py | bitcaster-io/bitcaster | 9f1bad96e00e3bc78a22451731e231d30662b166 | [
"BSD-3-Clause"
] | 60 | 2018-05-20T04:42:32.000Z | 2022-02-10T17:03:37.000Z | src/bitcaster/dispatchers/handlers/zulip.py | bitcaster-io/bitcaster | 9f1bad96e00e3bc78a22451731e231d30662b166 | [
"BSD-3-Clause"
] | 1 | 2018-08-04T05:06:45.000Z | 2018-08-04T05:06:45.000Z | from logging import getLogger
from django.utils.translation import gettext_lazy as _
from rest_framework import serializers
from zulip import Client as ZulipClient
from bitcaster.exceptions import PluginSendError
from bitcaster.utils.reflect import fqn
from ..base import (CoreDispatcher, DispatcherOptions,
MessageType, SubscriptionOptions,)
from ..registry import dispatcher_registry
logger = getLogger(__name__)
class ZulipMessage(MessageType):
has_subject = False
allow_html = False
class ZulipSubscription(SubscriptionOptions):
recipient = serializers.EmailField()
class ZulipOptions(DispatcherOptions):
site = serializers.URLField(help_text=_('URL where your Zulip server is located.'))
key = serializers.CharField(help_text=_("API key, which you can get through Zulip's web interface."))
email = serializers.EmailField(help_text=_('The email address of the user who owns the API key mentioned above.'))
insecure = serializers.BooleanField(default=True,
help_text=_('Use insecure connection'))
@dispatcher_registry.register
class ZulipPrivate(CoreDispatcher):
icon = '/bitcaster/images/icons/zulip.png'
options_class = ZulipOptions
subscription_class = ZulipSubscription
message_class = ZulipMessage
__help__ = _("""Zulip dispatcher to send private message
### Get API keys
- follow the [instructions](https://zulipchat.com/api/api-keys#get-a-bots-api-key) to get your keys
https://zulipchat.com/api/incoming-webhooks-overview#incoming-webhook-integrations
""")
def _get_connection(self) -> ZulipClient:
config = self.config
client = ZulipClient(email=config['email'],
api_key=config['key'],
site=config['site'])
return client
def emit(self, address, subject, message, connection=None, *args, **kwargs) -> str:
try:
conn = connection or self._get_connection()
# Send a stream message
request = {
'type': 'private',
'to': address,
'content': message,
}
result = conn.send_message(request)
if result['result'] != 'success':
raise PluginSendError(result['msg'])
self.logger.debug(f'{fqn(self)} sent to {address}')
return address
except Exception as e:
self.logger.exception(e)
def test_connection(self, raise_exception=False) -> bool:
try:
conn = self._get_connection()
conn.ensure_session()
return True
except Exception as e:
self.logger.exception(e)
if raise_exception:
raise
return False
| 33.511905 | 118 | 0.645471 |
c44e3771e8acdbf22be337978a9aca7694b49d0f | 2,094 | py | Python | Pytorch_1/dice_loss.py | sunshower76/Polyp-Segmentation | 764a62dd8de134e462939aafb0c1c24d67dfb564 | [
"MIT"
] | 2 | 2020-02-13T08:09:14.000Z | 2020-09-04T01:52:06.000Z | Pytorch_1/dice_loss.py | sunshower76/Polyp-Segmentation | 764a62dd8de134e462939aafb0c1c24d67dfb564 | [
"MIT"
] | null | null | null | Pytorch_1/dice_loss.py | sunshower76/Polyp-Segmentation | 764a62dd8de134e462939aafb0c1c24d67dfb564 | [
"MIT"
] | null | null | null | import torch
import torch.nn as nn
from torch.autograd import Function
def binaryDiceLoss(pred, target, eps=1e-5):
if torch.max(pred) > 1:
pred = pred.contiguous() / 255
else:
pred = pred.contiguous()
if torch.max(target) > 1:
target = target.contiguous() / 255
else:
target = target.contiguous()
"""
# This is incorrect. (1-(ab/a+b) + 1-(cd/c+d)) is not same with 1*2(ab+cd/a+b+c+d)
inter = torch.dot(pred.view(-1), target.view(-1))
union = torch.sum(pred) + torch.sum(target)
loss = 1*batch_num - (2 * inter + smooth) / (union + smooth) # 1*2(ab+cd/a+b+c+d)
"""
if len(pred.size()) == 4 and len(target.size()) == 4: # case of batch (Batchsize, C==1, H, W)
intersection = (pred * target).sum(dim=2).sum(dim=2) # sum of H,W axis
loss = (1 - ((2. * intersection + eps) / (pred.sum(dim=2).sum(dim=2) + target.sum(dim=2).sum(dim=2) + eps)))
# loss shape : (batch_size, 1)
elif len(pred.size()) == 3 and len(target.size()) == 3: # case of image shape (C==1,H,W)
intersection = (pred * target).sum(dim=1).sum(dim=1)
coeff = (1 - (2. * intersection) / (pred.sum(dim=1).sum(dim=1) + target.sum(dim=1).sum(dim=1) + eps))
return loss.mean() # (1-(ab/a+b) + 1-(cd/c+d)) / batch_size
def binaryDiceCoeff(pred, target, eps=1e-5):
if torch.max(pred) > 1:
pred = pred.contiguous() / 255
else:
pred = pred.contiguous()
if torch.max(target) > 1:
target = target.contiguous() / 255
else:
target = target.contiguous()
if len(pred.size()) == 4 and len(target.size()) == 4:
intersection = (pred * target).sum(dim=2).sum(dim=2) # sum of H,W axis
coeff = (2. * intersection + eps) / (pred.sum(dim=2).sum(dim=2) + target.sum(dim=2).sum(dim=2) + eps)
elif len(pred.size()) == 3 and len(target.size()) == 3:
intersection = (pred * target).sum(dim=1).sum(dim=1) # H, W f
coeff = (2. * intersection) / (pred.sum(dim=1).sum(dim=1) + target.sum(dim=1).sum(dim=1) + eps)
return coeff.mean() | 40.269231 | 116 | 0.56829 |
94c7aded3b5e5ac4d68a5c3bef02b9516a8ab8f0 | 989 | py | Python | tasks/kernels/container.py | faasm/experiment-lammps | 48c8802b1673b3cfc83d875e93660ef3fdd796a3 | [
"Apache-2.0"
] | null | null | null | tasks/kernels/container.py | faasm/experiment-lammps | 48c8802b1673b3cfc83d875e93660ef3fdd796a3 | [
"Apache-2.0"
] | 4 | 2020-12-07T08:06:40.000Z | 2021-04-05T08:12:10.000Z | tasks/kernels/container.py | faasm/experiment-lammps | 48c8802b1673b3cfc83d875e93660ef3fdd796a3 | [
"Apache-2.0"
] | null | null | null | from invoke import task
from os import environ
from copy import copy
from subprocess import run
from tasks.util.env import get_docker_tag, push_docker_image, PROJ_ROOT
from tasks.kernels.env import KERNELS_IMAGE_NAME, KERNELS_DOCKERFILE
@task(default=True)
def build(ctx, nocache=False, push=False):
"""
Build the container image used for kernels experiment
"""
shell_env = copy(environ)
shell_env["DOCKER_BUILDKIT"] = "1"
img_tag = get_docker_tag(KERNELS_IMAGE_NAME)
cmd = [
"docker",
"build",
"-f {}".format(KERNELS_DOCKERFILE),
"--no-cache" if nocache else "",
"-t {}".format(img_tag),
".",
]
cmd_str = " ".join(cmd)
print(cmd_str)
run(cmd_str, shell=True, check=True, cwd=PROJ_ROOT)
if push:
push_docker_image(img_tag)
@task
def push(ctx):
"""
Push the kernels container image
"""
img_tag = get_docker_tag(KERNELS_IMAGE_NAME)
push_docker_image(img_tag)
| 23 | 71 | 0.661274 |
bac432f156151e86911d79959b8be5c503cb98cf | 11,748 | py | Python | test/python/topology/test1.py | Jaimie-Jin1/streamsx.topology | 6f316ec8e9ed1349c6f061d9bb7d03deb87e3d08 | [
"Apache-2.0"
] | null | null | null | test/python/topology/test1.py | Jaimie-Jin1/streamsx.topology | 6f316ec8e9ed1349c6f061d9bb7d03deb87e3d08 | [
"Apache-2.0"
] | null | null | null | test/python/topology/test1.py | Jaimie-Jin1/streamsx.topology | 6f316ec8e9ed1349c6f061d9bb7d03deb87e3d08 | [
"Apache-2.0"
] | null | null | null | # Licensed Materials - Property of IBM
# Copyright IBM Corp. 2016
import unittest
import sys
import test_functions
from test_utilities import standalone
from streamsx.topology.topology import *
from streamsx.topology import schema
import streamsx.topology.context
class TestTopologyMethods(unittest.TestCase):
def test_TopologyName(self):
topo = Topology("test_TopologyNameExplicit")
self.assertEqual("test_TopologyNameExplicit", topo.name)
self.assertEqual("test1", topo.namespace)
def test_TopologyNoName(self):
topo = Topology()
self.assertEqual("test_TopologyNoName", topo.name)
self.assertEqual("test1", topo.namespace)
def test_TopologyNamespace(self):
topo = Topology(namespace="myns")
self.assertEqual("test_TopologyNamespace", topo.name)
self.assertEqual("myns", topo.namespace)
def test_TopologyNameNamespace(self):
topo = Topology(name="myapp", namespace="myns")
self.assertEqual("myapp", topo.name)
self.assertEqual("myns", topo.namespace)
def test_empty(self):
topo = Topology(name="what_no_streams")
self.assertRaises(ValueError, streamsx.topology.context.submit, "TOOLKIT", topo)
def test_TopologySourceAndSink(self):
topo = Topology("test_TopologySourceAndSink")
hw = topo.source(test_functions.hello_world)
hw.sink(test_functions.check_hello_world)
standalone(self, topo)
def test_TopologyFilter(self):
topo = Topology("test_TopologyFilter")
hw = topo.source(test_functions.hello_world)
hwf = hw.filter(test_functions.filter)
hwf.sink(test_functions.check_hello_world_filter)
standalone(self, topo)
def test_TopologyLengthFilter(self):
topo = Topology("test_TopologyLengthFilter")
hw = topo.source(test_functions.strings_length_filter)
hwf = hw.filter(test_functions.LengthFilter(5))
hwf.sink(test_functions.check_strings_length_filter)
standalone(self, topo)
def test_TopologyIsolate(self):
topo = Topology("test_TopologyIsolate")
hw = topo.source(test_functions.hello_world)
iso = hw.isolate()
iso.sink(test_functions.check_hello_world)
standalone(self, topo)
def test_TopologyIsolatedFilter(self):
topo = Topology("test_TopologyIsolatedFilter")
hw = topo.source(test_functions.hello_world)
iso1 = hw.isolate()
hwf = iso1.filter(test_functions.filter)
iso2 = hwf.isolate()
iso2.sink(test_functions.check_hello_world_filter)
standalone(self, topo)
# switch this to BUNDLE to create a sab file that can
# be sumitted to a streams instance and run as 3 PEs
# streamsx.topology.context.submit("BUNDLE", topo.graph)
def test_TopologyLowLatency(self):
topo = Topology("test_TopologyLowLatency")
hw = topo.source(test_functions.hello_world)
low1 = hw.low_latency()
hwf1 = low1.filter(test_functions.filter)
hwf2 = hwf1.filter(test_functions.filter)
elow1 = hwf2.end_low_latency()
hwf3 = elow1.filter(test_functions.filter)
hwf3.sink(test_functions.check_hello_world_filter)
standalone(self, topo)
streamsx.topology.context.submit("BUNDLE", topo.graph)
def test_TopologyStringSubscribe(self):
topo = Topology("test_TopologyStringSubscribe")
hw = topo.subscribe("python.test.topic1", schema.CommonSchema.String)
hw.sink(test_functions.check_hello_world)
#streamsx.topology.context.submit("BUNDLE", topo.graph)
def test_TopologyTransform(self):
topo = Topology("test_TopologyTransform")
source = topo.source(test_functions.int_strings_transform)
i1 = source.transform(int)
i2 = i1.transform(test_functions.add17)
i2.sink(test_functions.check_int_strings_transform)
standalone(self, topo)
def test_TopologyTransformWithDrop(self):
topo = Topology("test_TopologyTransformWithDrop")
source = topo.source(test_functions.int_strings_transform_with_drop)
i1 = source.map(test_functions.string_to_int_except68)
i2 = i1.map(test_functions.add17)
i2.sink(test_functions.check_int_strings_transform_with_drop)
standalone(self, topo)
def test_TopologyMultiTransform(self):
topo = Topology("test_TopologyMultiTransform")
source = topo.source(test_functions.strings_multi_transform)
i1 = source.multi_transform(test_functions.split_words)
i1.sink(test_functions.check_strings_multi_transform)
standalone(self, topo)
def test_TopologyTransformCallableAddWithDrop(self):
topo = Topology("test_TopologyTransformCallableAddWithDrop")
source = topo.source(test_functions.int_strings_transform_with_drop)
i1 = source.transform(test_functions.string_to_int_except68)
i2 = i1.transform(test_functions.AddNum(17))
i2.sink(test_functions.check_int_strings_transform_with_drop)
standalone(self, topo)
def test_TopologyMultiTransformCallableIncMaxSplit(self):
topo = Topology("test_TopologyMultiTransformCallableIncMaxSplit")
source = topo.source(test_functions.strings_multi_transform)
i1 = source.flat_map(test_functions.IncMaxSplitWords(1))
i1.sink(test_functions.check_strings_multi_transform_inc_max_split)
standalone(self, topo)
def test_TopologySourceAndSinkCallable(self):
topo = Topology("test_TopologySourceAndSinkCallable")
hw = topo.source(test_functions.SourceTuplesAppendIndex(["a", "b", "c", "d"]))
hw.sink(test_functions.CheckTuples(["a0", "b1", "c2", "d3"]))
standalone(self, topo)
def test_TopologyParallel(self):
topo = Topology("test_TopologyParallel")
hw = topo.source(test_functions.seedSource)
hwp = hw.parallel(4)
hwf = hwp.transform(test_functions.ProgramedSeed())
hwef = hwf.end_parallel()
hwef.sink(test_functions.SeedSinkRR())
standalone(self, topo)
def test_TopologyHashedFuncParallel(self):
topo = Topology("test_TopologyHashedFuncParallel")
hw = topo.source(test_functions.seedSource)
hwp = hw.parallel(4,Routing.HASH_PARTITIONED,test_functions.produceHash)
hwf = hwp.transform(test_functions.ProgramedSeed())
hwef = hwf.end_parallel()
hwef.sink(test_functions.SeedSinkHashOrKey())
standalone(self, topo)
def test_TopologyHashedParallel(self):
topo = Topology("test_TopologyHashedParallel")
hw = topo.source(test_functions.seedSource)
hwp = hw.parallel(4,Routing.HASH_PARTITIONED)
hwf = hwp.transform(test_functions.ProgramedSeed())
hwef = hwf.end_parallel()
hwef.sink(test_functions.SeedSinkHashOrKey())
standalone(self, topo)
def test_TopologyUnion(self):
topo = Topology("test_TopologyUnion")
h = topo.source(test_functions.hello)
b = topo.source(test_functions.beautiful)
c = topo.source(test_functions.crazy)
w = topo.source(test_functions.world)
streamSet = {h, w, b, c}
hwu = h.union(streamSet)
hwu.sink(test_functions.check_union_hello_world)
standalone(self, topo)
def test_TopologyParallelUnion(self):
topo = Topology("test_TopologyParallelUnion")
hw = topo.source(test_functions.seedSource)
hwp = hw.parallel(4)
hwf = hwp.transform(test_functions.ProgramedSeed())
hwf2 = hwp.transform(test_functions.ProgramedSeed())
streamSet = {hwf2}
hwu = hwf.union(streamSet)
hwup = hwu.end_parallel()
hwup.sink(test_functions.SeedSinkRRPU())
standalone(self, topo)
# test using input functions from a regular package that has __init__.py
# test using input functions that are fully qualified
def test_TopologyImportPackage(self):
import test_package.test_subpackage.test_module
try:
topo = Topology("test_TopologyImportPackage")
hw = topo.source(test_package.test_subpackage.test_module.SourceTuples(["Hello", "World!"]))
hwf = hw.filter(test_package.test_subpackage.test_module.filter)
hwf.sink(test_package.test_subpackage.test_module.CheckTuples(["Hello"]))
standalone(self, topo)
finally:
pass
# test using input functions from an implicit namespace package that doesn't have a __init__.py
# test using input functions that are qualified using a module alias e.g. 'test_ns_module'
# test using input functions from a mix of packages and individual modules
def test_TopologyImportNamespacePackage(self):
from test_namespace_package.test_subpackage import test_module as test_ns_module
try:
topo = Topology("test_TopologyImportNamespacePackage")
hw = topo.source(test_ns_module.SourceTuples(["Hello", "World!"]))
hwf = hw.filter(test_functions.filter)
hwf.sink(test_ns_module.CheckTuples(["World!"]))
standalone(self, topo)
finally:
del test_ns_module
# test using input functions from a namespace package that merges separate packages into a
# common namespace
def test_TopologyImportCommonNamespacePackage(self):
this_dir = os.path.dirname(os.path.realpath(__file__))
tcn = os.path.join(this_dir, 'test_common_namespace')
tcn_paths = [os.path.join(tcn, 'package1'), os.path.join(tcn,'package2')]
sys.path.extend(tcn_paths)
import common_namespace.module1
import common_namespace.module2
try:
topo = Topology("test_TopologyImportCommonNamespacePackage")
hw = topo.source(common_namespace.module1.SourceTuples(["Hello", "World!"]))
hwf = hw.filter(common_namespace.module2.filter)
hwf.sink(common_namespace.module2.CheckTuples(["World!"]))
standalone(self, topo)
finally:
for p in tcn_paths:
sys.path.remove(p)
del common_namespace.module1, common_namespace.module2
# test using input functions from a module that imports another module
def test_TopologyImportModuleWithDependencies(self):
import test_functions2
try:
topo = Topology("test_TopologyImportModuleWithDependencies")
hw = topo.source(test_functions2.hello_world)
hwf = hw.filter(test_functions2.filter)
hwf.sink(test_functions2.check_hello_world_filter)
standalone(self, topo)
finally:
del test_functions2
class TestPlaceable(unittest.TestCase):
def test_placeable(self):
topo = Topology()
s1 = topo.source([])
self.assertFalse(s1.resource_tags)
self.assertIsInstance(s1.resource_tags, set)
s1.resource_tags.add('ingest')
s1.resource_tags.add('db')
self.assertEqual({'ingest', 'db'}, s1.resource_tags)
s2 = s1.filter(lambda x : True)
s2.resource_tags.add('cpu1')
self.assertEqual({'cpu1'}, s2.resource_tags)
s3 = s1.map(lambda x : x)
s3.resource_tags.add('cpu2')
self.assertEqual({'cpu2'}, s3.resource_tags)
s4 = s1.flat_map(lambda x : [x])
s4.resource_tags.add('cpu3')
self.assertEqual({'cpu3'}, s4.resource_tags)
self.assertEqual({'ingest', 'db'}, s1.resource_tags)
self.assertEqual({'cpu1'}, s2.resource_tags)
self.assertEqual({'cpu2'}, s3.resource_tags)
self.assertEqual({'cpu3'}, s4.resource_tags)
def test_not_placeable(self):
topo = Topology()
s1 = topo.source([])
s2 = topo.source([])
s3 = s1.union({s2})
self._check_not_placeable(s3)
self._check_not_placeable(s1.autonomous())
self._check_not_placeable(s1.isolate())
self._check_not_placeable(s1.parallel(3))
self._check_not_placeable(s1.low_latency())
def _check_not_placeable(self, s):
self.assertFalse(s.resource_tags)
self.assertIsInstance(s.resource_tags, frozenset)
| 40.095563 | 102 | 0.714164 |
288fb21245d18286e29133df0c13cc2a2ab48799 | 195 | py | Python | manage.py | silv3rmat/django-teryt-tree | 1dafe82f308dfef375f92d2156c5ebc5b5216c5f | [
"BSD-3-Clause"
] | 31 | 2016-06-01T17:42:18.000Z | 2022-02-09T20:58:01.000Z | manage.py | silv3rmat/django-teryt-tree | 1dafe82f308dfef375f92d2156c5ebc5b5216c5f | [
"BSD-3-Clause"
] | 15 | 2016-06-01T21:19:12.000Z | 2021-12-15T13:48:33.000Z | manage.py | silv3rmat/django-teryt-tree | 1dafe82f308dfef375f92d2156c5ebc5b5216c5f | [
"BSD-3-Clause"
] | 13 | 2016-08-31T08:00:33.000Z | 2021-12-15T12:08:08.000Z | #!/usr/bin/env python
import os
os.environ['DJANGO_SETTINGS_MODULE'] = 'tests.settings'
from django.core import management
if __name__ == "__main__":
management.execute_from_command_line()
| 21.666667 | 55 | 0.769231 |
8a0766721f07ddce0986a17470bce9a685b5b1c5 | 2,491 | py | Python | recipes/Python/286204_Case_Insensitive_Sort/recipe-286204.py | tdiprima/code | 61a74f5f93da087d27c70b2efe779ac6bd2a3b4f | [
"MIT"
] | 2,023 | 2017-07-29T09:34:46.000Z | 2022-03-24T08:00:45.000Z | recipes/Python/286204_Case_Insensitive_Sort/recipe-286204.py | unhacker/code | 73b09edc1b9850c557a79296655f140ce5e853db | [
"MIT"
] | 32 | 2017-09-02T17:20:08.000Z | 2022-02-11T17:49:37.000Z | recipes/Python/286204_Case_Insensitive_Sort/recipe-286204.py | unhacker/code | 73b09edc1b9850c557a79296655f140ce5e853db | [
"MIT"
] | 780 | 2017-07-28T19:23:28.000Z | 2022-03-25T20:39:41.000Z | # 06-07-04
#v1.0.0
# caselesssort.py
# A sort function for lists of strings that is case insensitive.
# Copyright Michael Foord
# You are free to modify, use and relicense this code.
# No warranty express or implied for the accuracy, fitness to purpose or otherwise for this code....
# Use at your own risk !!!
# E-mail michael AT foord DOT me DOT uk
# Maintained at www.voidspace.org.uk/atlantibots/pythonutils.html
"""
The built in sort method for lists is case sensitive.
This means it can be unsuitable for sorting some lists of strings.
e.g. ['Apple', 'Pear', 'apple'].sort()
leaves 'Apple' and 'apple' at opposite ends of the list.
You can pass in a function to the sort method - but this can be very slow.
cSort still uses the sort method, so there isn't much performance hit, but it is caseless.
cSort can handle non-string members in the list without failing.
In addition cSort will sort any sets of entries for which entry1.lower() == entry2.lower()
i.e. cSort(['fish', 'FISH', 'fIsh'])
returns ['FISH', 'fIsh', 'fish']
You can turn this behaviour off by passing cSort an optional 'False' parameter.
i.e. cSort(['fish', 'FISH', 'fIsh'], False)
returns ['fish', 'FISH', 'fIsh']
"""
def cSort(inlist, minisort=True):
sortlist = []
newlist = []
sortdict = {}
for entry in inlist:
try:
lentry = entry.lower()
except AttributeError:
sortlist.append(lentry)
else:
try:
sortdict[lentry].append(entry)
except KeyError:
sortdict[lentry] = [entry]
sortlist.append(lentry)
sortlist.sort()
for entry in sortlist:
try:
thislist = sortdict[entry]
if minisort: thislist.sort()
newlist = newlist + thislist
except KeyError:
newlist.append(entry)
return newlist
if __name__ == '__main__':
list1 = ['pish', 'fish', 'FISH', 'Fish', 'PISH', 'FIsh', 'fiSH', 'Pish','piSH']
list2 = list(list1)
print 'Here is an unsorted list :'
print list1
list1.sort()
print 'Here is a list sorted using list.sort() :'
print list1
print 'Here is the list sorted using cSort(list) :'
print cSort(list2)
print 'Here is the list sorted using cSort(list, False) :'
print cSort(list2, False)
"""
TODO/ISSUES
CHANGELOG
06-07-04 Version 1.0.0
A working caseless sort.
Will be part of the caseless module, but also stands on it's own.
"""
| 29.305882 | 100 | 0.643517 |
12aa0f223efb47e74ece384b5d7a7dc71fe5713e | 11,812 | py | Python | tests/fpga/memory_buffering_test.py | Walon1998/dace | 95ddfd3e9a5c654f0f0d66d026e0b64ec0f028a0 | [
"BSD-3-Clause"
] | 1 | 2022-03-11T13:36:34.000Z | 2022-03-11T13:36:34.000Z | tests/fpga/memory_buffering_test.py | Walon1998/dace | 95ddfd3e9a5c654f0f0d66d026e0b64ec0f028a0 | [
"BSD-3-Clause"
] | null | null | null | tests/fpga/memory_buffering_test.py | Walon1998/dace | 95ddfd3e9a5c654f0f0d66d026e0b64ec0f028a0 | [
"BSD-3-Clause"
] | null | null | null | # Copyright 2019-2021 ETH Zurich and the DaCe authors. All rights reserved.
"""
Tests memory buffering in an FPGA SDFG, where memory is read and written using
512-bit wide accesses, and converted (using a "gearbox") to/from the vector
width used by the computational kernel.
Unfortunately this doesn't currently work for Intel, since Intel does not
support vectors of vectors in kernel code.
"""
import dace
from dace.fpga_testing import fpga_test, xilinx_test
from dace.libraries.standard import Gearbox
from dace.transformation.interstate import FPGATransformSDFG, InlineSDFG
import numpy as np
dtype = dace.float32
mem_width = 64 // dtype.bytes
n = dace.symbol("n")
def run_program(sdfg: dace.SDFG):
size = 16 * mem_width
input_array = np.ones((size, ), dtype.type)
output_array = np.empty((size, ), dtype.type)
sdfg(input_array_host=input_array, output_array_host=output_array, n=size)
assert all(output_array == input_array + 1)
def memory_buffering(vec_width, use_library_node, elementwise):
gear_factor = mem_width // vec_width
kernel_type = dace.vector(dtype, vec_width)
if elementwise:
memory_type = dace.vector(dtype, mem_width)
else:
memory_type = dace.vector(kernel_type, gear_factor)
sdfg = dace.SDFG("memory_buffering_library_node")
state = sdfg.add_state("memory_buffering_library_node")
sdfg.add_array("input_array", (n / mem_width, ), memory_type, transient=True, storage=dace.StorageType.FPGA_Global)
sdfg.add_array("output_array", (n / mem_width, ), memory_type, transient=True, storage=dace.StorageType.FPGA_Global)
sdfg.add_stream("read_to_gearbox", memory_type, transient=True, storage=dace.StorageType.FPGA_Local)
sdfg.add_stream("gearbox_to_kernel", kernel_type, transient=True, storage=dace.StorageType.FPGA_Local)
sdfg.add_stream("kernel_to_gearbox", kernel_type, transient=True, storage=dace.StorageType.FPGA_Local)
sdfg.add_stream("gearbox_to_write", memory_type, transient=True, storage=dace.StorageType.FPGA_Local)
# Read from memory
memory_read = state.add_read("input_array")
read_to_gearbox_write = state.add_write("read_to_gearbox")
read_entry, read_exit = state.add_map("read", {"i": f"0:n/{mem_width}"}, schedule=dace.ScheduleType.FPGA_Device)
read_tasklet = state.add_tasklet("read", {"mem"}, {"to_gearbox"}, "to_gearbox = mem")
state.add_memlet_path(memory_read, read_entry, read_tasklet, dst_conn="mem", memlet=dace.Memlet(f"input_array[i]"))
state.add_memlet_path(read_tasklet,
read_exit,
read_to_gearbox_write,
src_conn="to_gearbox",
memlet=dace.Memlet(f"read_to_gearbox[0]"))
# Gearbox input
read_to_gearbox_read = state.add_read("read_to_gearbox")
gearbox_to_kernel_write = state.add_write("gearbox_to_kernel")
if use_library_node:
read_gearbox = Gearbox(n / mem_width, name="read_gearbox")
state.add_node(read_gearbox)
state.add_memlet_path(read_to_gearbox_read,
read_gearbox,
dst_conn="from_memory",
memlet=dace.Memlet("read_to_gearbox[0]", volume=n / mem_width))
state.add_memlet_path(read_gearbox,
gearbox_to_kernel_write,
src_conn="to_kernel",
memlet=dace.Memlet("gearbox_to_kernel[0]", volume=n / vec_width))
else:
sdfg.add_array("read_buffer", (1, ), memory_type, storage=dace.StorageType.FPGA_Local, transient=True)
read_buffer_read = state.add_read("read_buffer")
read_buffer_write = state.add_write("read_buffer")
read_gearbox_entry, read_gearbox_exit = state.add_map("gearbox_read", {
"i": f"0:n/{mem_width}",
"j": f"0:{gear_factor}"
},
schedule=dace.ScheduleType.FPGA_Device)
read_gearbox_tasklet = state.add_tasklet(
"gearbox_read", {
"from_memory": memory_type,
"buffer_in": None
}, {"to_kernel", "buffer_out"}, """\
wide = from_memory if j == 0 else buffer_in
to_kernel = wide[j]
buffer_out = wide""")
state.add_memlet_path(read_to_gearbox_read,
read_gearbox_entry,
read_gearbox_tasklet,
dst_conn="from_memory",
memlet=dace.Memlet("read_to_gearbox[0]", dynamic=True))
state.add_memlet_path(read_buffer_read,
read_gearbox_entry,
read_gearbox_tasklet,
dst_conn="buffer_in",
memlet=dace.Memlet("read_buffer[0]"))
state.add_memlet_path(read_gearbox_tasklet,
read_gearbox_exit,
gearbox_to_kernel_write,
src_conn="to_kernel",
memlet=dace.Memlet("gearbox_to_kernel[0]"))
state.add_memlet_path(read_gearbox_tasklet,
read_gearbox_exit,
read_buffer_write,
src_conn="buffer_out",
memlet=dace.Memlet("read_buffer[0]"))
# Some fictional compute
gearbox_to_kernel_read = state.add_read("gearbox_to_kernel")
kernel_to_gearbox_write = state.add_write("kernel_to_gearbox")
compute_entry, compute_exit = state.add_map("compute", {"i": f"0:n/{vec_width}"},
schedule=dace.ScheduleType.FPGA_Device)
compute_tasklet = state.add_tasklet("compute", {"val_in"}, {"val_out"}, "val_out = val_in + 1")
state.add_memlet_path(gearbox_to_kernel_read,
compute_entry,
compute_tasklet,
dst_conn="val_in",
memlet=dace.Memlet("gearbox_to_kernel[0]"))
state.add_memlet_path(compute_tasklet,
compute_exit,
kernel_to_gearbox_write,
src_conn="val_out",
memlet=dace.Memlet("kernel_to_gearbox[0]"))
# Gearbox output
kernel_to_gearbox_read = state.add_write("kernel_to_gearbox")
gearbox_to_write_write = state.add_read("gearbox_to_write")
if use_library_node:
write_gearbox = Gearbox(n / mem_width, name="write_gearbox")
state.add_node(write_gearbox)
state.add_memlet_path(kernel_to_gearbox_read,
write_gearbox,
dst_conn="from_kernel",
memlet=dace.Memlet("kernel_to_gearbox[0]", volume=n / vec_width))
state.add_memlet_path(write_gearbox,
gearbox_to_write_write,
src_conn="to_memory",
memlet=dace.Memlet("gearbox_to_write[0]", volume=n / mem_width))
else:
sdfg.add_array("write_buffer", (1, ), memory_type, storage=dace.StorageType.FPGA_Local, transient=True)
write_buffer_read = state.add_read("write_buffer")
write_buffer_write = state.add_write("write_buffer")
write_gearbox_entry, write_gearbox_exit = state.add_map("gearbox_write", {
"i": f"0:n/{mem_width}",
"j": f"0:{gear_factor}"
},
schedule=dace.ScheduleType.FPGA_Device)
write_gearbox_tasklet = state.add_tasklet(
"gearbox_write", {"from_kernel", "buffer_in"}, {"to_memory", "buffer_out"}, f"""\
wide = buffer_in
wide[j] = from_kernel
if j == {gear_factor} - 1:
to_memory = wide
buffer_out = wide""")
state.add_memlet_path(kernel_to_gearbox_read,
write_gearbox_entry,
write_gearbox_tasklet,
dst_conn="from_kernel",
memlet=dace.Memlet("kernel_to_gearbox[0]"))
state.add_memlet_path(write_buffer_read,
write_gearbox_entry,
write_gearbox_tasklet,
dst_conn="buffer_in",
memlet=dace.Memlet("write_buffer[0]"))
state.add_memlet_path(write_gearbox_tasklet,
write_gearbox_exit,
gearbox_to_write_write,
src_conn="to_memory",
memlet=dace.Memlet("gearbox_to_write[0]", dynamic=True))
state.add_memlet_path(write_gearbox_tasklet,
write_gearbox_exit,
write_buffer_write,
src_conn="buffer_out",
memlet=dace.Memlet("write_buffer[0]"))
# Write memory
gearbox_to_write_read = state.add_read("gearbox_to_write")
memory_write = state.add_write("output_array")
write_entry, write_exit = state.add_map("write", {"i": f"0:n/{mem_width}"}, schedule=dace.ScheduleType.FPGA_Device)
write_tasklet = state.add_tasklet("write", {"from_gearbox"}, {"mem"}, "mem = from_gearbox")
state.add_memlet_path(gearbox_to_write_read,
write_entry,
write_tasklet,
dst_conn="from_gearbox",
memlet=dace.Memlet("gearbox_to_write[0]"))
state.add_memlet_path(write_tasklet,
write_exit,
memory_write,
src_conn="mem",
memlet=dace.Memlet("output_array[i]"))
# Copy data to the FPGA
sdfg.add_array("input_array_host", (n, ), dtype)
pre_state = sdfg.add_state("host_to_device")
host_to_device_read = pre_state.add_read("input_array_host")
host_to_device_write = pre_state.add_write("input_array")
pre_state.add_memlet_path(host_to_device_read,
host_to_device_write,
memlet=dace.Memlet(f"input_array[0:n/{mem_width}]"))
# Copy data back to the host
sdfg.add_array("output_array_host", (n, ), dtype)
post_state = sdfg.add_state("device_to_host")
device_to_host_read = post_state.add_read("output_array")
device_to_host_write = post_state.add_write("output_array_host")
post_state.add_memlet_path(device_to_host_read,
device_to_host_write,
memlet=dace.Memlet(f"output_array[0:n/{mem_width}]"))
# Link states
sdfg.add_edge(pre_state, state, dace.InterstateEdge())
sdfg.add_edge(state, post_state, dace.InterstateEdge())
run_program(sdfg)
return sdfg
@xilinx_test()
def test_memory_buffering_manual():
return memory_buffering(4, False, False)
@xilinx_test()
def test_memory_buffering_manual_scalar():
return memory_buffering(1, False, False)
@xilinx_test()
def test_memory_buffering_library_node():
return memory_buffering(4, True, False)
@xilinx_test()
def test_memory_buffering_library_node_scalar():
return memory_buffering(1, True, False)
@fpga_test()
def test_memory_buffering_library_node_elementwise():
return memory_buffering(4, True, True)
@fpga_test()
def test_memory_buffering_library_node_elementwise_scalar():
return memory_buffering(1, True, True)
if __name__ == "__main__":
test_memory_buffering_manual(None)
test_memory_buffering_library_node(None)
test_memory_buffering_library_node_scalar(None)
test_memory_buffering_library_node_elementwise(None)
| 45.961089 | 120 | 0.612936 |
361438d26816c5c48486fb57e5cc7627ef531a3a | 8,592 | py | Python | pypybox2d/joints/friction.py | the-mba/Progra-Super-Mario | 90dc2a4ba815732b6e92652c7f8bb4a345d25e91 | [
"MIT"
] | null | null | null | pypybox2d/joints/friction.py | the-mba/Progra-Super-Mario | 90dc2a4ba815732b6e92652c7f8bb4a345d25e91 | [
"MIT"
] | null | null | null | pypybox2d/joints/friction.py | the-mba/Progra-Super-Mario | 90dc2a4ba815732b6e92652c7f8bb4a345d25e91 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# C++ version Copyright (c) 2006-2011 Erin Catto http://www.box2d.org
# Python port by Ken Lauer / http://pybox2d.googlecode.com
#
# This software is provided 'as-is', without any express or implied
# warranty. In no event will the authors be held liable for any damages
# arising from the use of this software.
# Permission is granted to anyone to use this software for any purpose,
# including commercial applications, and to alter it and redistribute it
# freely, subject to the following restrictions:
# 1. The origin of this software must not be misrepresented; you must not
# claim that you wrote the original software. If you use this software
# in a product, an acknowledgment in the product documentation would be
# appreciated but is not required.
# 2. Altered source versions must be plainly marked as such, and must not be
# misrepresented as being the original software.
# 3. This notice may not be removed or altered from any source distribution.
from __future__ import absolute_import
__all__ = ('FrictionJoint', )
__version__ = "$Revision: 353 $"
__date__ = "$Date: 2011-07-15 17:13:40 -0400 (Fri, 15 Jul 2011) $"
# $Source$
from ..common import (Vec2, Mat22, scalar_cross, clamp, is_valid_float, property)
from .joint import Joint
class FrictionJoint(Joint):
"""
Friction joint. This is used for top-down friction.
It provides 2D translational friction and angular friction.
"""
# Point-to-point constraint
# Cdot = vb - va
# = vb + cross(wb, rb) - va - cross(wa, ra)
# J = [-I -ra_skew I rb_skew ]
# Identity used:
# w k % (rx i + ry j) = w * (-ry i + rx j)
# Angle constraint
# Cdot = wb - wa
# J = [0 0 -1 0 0 1]
# K = invIa + invIb
def __init__(self, body_a, body_b, local_anchor_a=(0,0), local_anchor_b=(0,0),
max_force=0.0, max_torque=0.0, collide_connected=False):
if body_a is None or body_b is None:
raise ValueError('body_a and body_b must be set')
Joint.__init__(self, body_a, body_b, collide_connected)
self._local_anchor_a = Vec2(*local_anchor_a)
self._local_anchor_b = Vec2(*local_anchor_b)
self._linear_impulse = Vec2()
self._angular_impulse = 0.0
self._max_force = max_force
self._max_torque = max_torque
def __copy__(self):
return FrictionJoint(self._body_a, self._body_b, self._local_anchor_a,
self._local_anchor_b, self._max_force, self._max_torque,
self._collide_connected)
def get_reaction_force(self, inv_dt):
"""Get the reaction force on body_b at the joint anchor in Newtons."""
return inv_dt * self._linear_impulse
def get_reaction_torque(self, inv_dt):
"""Get the reaction torque on body_b in N*m."""
return inv_dt * self._angular_impulse
@property
def anchor_a(self):
"""Get the anchor point on body_a in world coordinates"""
return self._body_a.get_world_point(self._local_anchor_a)
@property
def anchor_b(self):
"""Get the anchor point on body_b in world coordinates"""
return self._body_b.get_world_point(self._local_anchor_b)
@property
def max_force(self):
"""The maximum friction force in N."""
return self._max_force
@max_force.setter
def max_force(self, max_force):
if not is_valid_float(max_force) or max_force < 0.0:
raise ValueError('Max force must be >= 0.0')
self._max_force = max_force
@property
def max_torque(self):
"""The maximum friction torque in N*m."""
return self._max_torque
@max_torque.setter
def max_torque(self, max_torque):
if not is_valid_float(max_torque) or max_torque < 0.0:
raise ValueError('Max torque must be >= 0.0')
self._max_torque = max_torque
def _init_velocity_constraints(self, step, positions, velocities):
body_a, body_b = self._body_a, self._body_b
index_a = body_a._island_index
index_b = body_b._island_index
self._indices = (index_a, index_b)
local_center_a = self._local_center_a = body_a._sweep.local_center
local_center_b = self._local_center_b = body_b._sweep.local_center
inv_mass_a = self._inv_mass_a = body_a._inv_mass
inv_mass_b = self._inv_mass_b = body_b._inv_mass
inv_Ia = self._inv_Ia = body_a._invI
inv_Ib = self._inv_Ib = body_b._invI
ca, aa = positions[index_a]
cb, ab = positions[index_b]
va, wa = velocities[index_a]
vb, wb = velocities[index_b]
qa = Mat22(angle=aa)
qb = Mat22(angle=ab)
# Compute the effective mass matrix.
ra = self._ra = qa * (self._local_anchor_a - local_center_a)
rb = self._rb = qb * (self._local_anchor_b - local_center_b)
# Compute the effective mass matrix.
ra = qa * (self._local_anchor_a - self._local_center_a)
rb = qb * (self._local_anchor_b - self._local_center_b)
# J = [-I -ra_skew I rb_skew]
# [ 0 -1 0 1]
# r_skew = [-ry; rx]
# Matlab
# K = [ mA+ray^2*iA+mB+rby^2*iB, -ray*iA*rax-rby*iB*rbx, -ray*iA-rby*iB]
# [ -ray*iA*rax-rby*iB*rbx, mA+rax^2*iA+mB+rbx^2*iB, rax*iA+rbx*iB]
# [ -ray*iA-rby*iB, rax*iA+rbx*iB, iA+iB]
ma, mb = self._inv_mass_a, self._inv_mass_b
ia, ib = self._inv_Ia, self._inv_Ib
K = Mat22()
K.col1 = Vec2(inv_mass_a + inv_mass_b + inv_Ia * ra.y * ra.y + inv_Ib * rb.y * rb.y,
-inv_Ia * ra.x * ra.y - inv_Ib * rb.x * rb.y)
K.col2 = Vec2(K.col1.y,
inv_mass_a + inv_mass_b + inv_Ia * ra.x * ra.x + inv_Ib * rb.x * rb.x)
self._linear_mass = K.inverse
self._angular_mass = inv_Ia + inv_Ib
if self._angular_mass > 0.0:
self._angular_mass = 1.0 / self._angular_mass
if step.warm_starting:
# Scale impulses to support a variable time step.
self._linear_impulse *= step.dt_ratio
self._angular_impulse *= step.dt_ratio
P = self._linear_impulse
va -= inv_mass_a * P
wa -= inv_Ia * (ra.cross(P) + self._angular_impulse)
vb += inv_mass_b * P
wb += inv_Ib * (rb.cross(P) + self._angular_impulse)
else:
self._linear_impulse = Vec2()
self._angular_impulse = 0.0
velocities[index_a] = (va, wa)
velocities[index_b] = (vb, wb)
def _solve_velocity_constraints(self, step, positions, velocities):
dt = step.dt
index_a, index_b = self._indices
va, wa = velocities[index_a]
vb, wb = velocities[index_b]
ca, aa = positions[index_a]
cb, ab = positions[index_b]
ra, rb = self._ra, self._rb
inv_mass_a, inv_mass_b = self._inv_mass_a, self._inv_mass_b
inv_Ia, inv_Ib = self._inv_Ia, self._inv_Ib
# Solve angular friction
Cdot = wb - wa
impulse = -self._angular_mass * Cdot
old_impulse = self._angular_impulse
max_impulse = dt * self._max_torque
self._angular_impulse = clamp(self._angular_impulse + impulse, -max_impulse, max_impulse)
impulse = self._angular_impulse - old_impulse
wa -= inv_Ia * impulse
wb += inv_Ib * impulse
# Solve linear friction
Cdot = vb + scalar_cross(wb, rb) - va - scalar_cross(wa, ra)
impulse = -(self._linear_mass * Cdot)
old_impulse = Vec2(*self._linear_impulse)
self._linear_impulse += impulse
max_impulse = dt * self._max_force
if self._linear_impulse.length_squared > max_impulse ** 2:
self._linear_impulse = self._linear_impulse.normalized * max_impulse
impulse = self._linear_impulse - old_impulse
va -= inv_mass_a * impulse
wa -= inv_Ia * ra.cross(impulse)
vb += inv_mass_b * impulse
wb += inv_Ib * rb.cross(impulse)
positions[index_a] = (ca, aa)
positions[index_b] = (cb, ab)
def _solve_position_constraints(self, step, positions, velocities):
return True
| 36.717949 | 98 | 0.606378 |
ef0fd4c0e8de32df428c1523af251be68bdad019 | 1,280 | py | Python | acertmgr/authority/__init__.py | davidklaftenegger/acertmgr | 93e28437ffe8ccc8f96335436ca814fed3cff8de | [
"0BSD"
] | 21 | 2016-04-03T13:26:14.000Z | 2021-09-23T16:41:22.000Z | acertmgr/authority/__init__.py | davidklaftenegger/acertmgr | 93e28437ffe8ccc8f96335436ca814fed3cff8de | [
"0BSD"
] | 24 | 2016-04-10T09:37:48.000Z | 2021-11-23T19:10:49.000Z | acertmgr/authority/__init__.py | davidklaftenegger/acertmgr | 93e28437ffe8ccc8f96335436ca814fed3cff8de | [
"0BSD"
] | 12 | 2016-01-10T14:27:19.000Z | 2022-01-02T16:20:20.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# authority - authority api package
# Copyright (c) Rudolf Mayerhofer, 2019.
# available under the ISC license, see LICENSE
import importlib
import json
import os
from acertmgr import tools
from acertmgr.tools import log
authorities = dict()
# @brief find or create a suitable authority for the given settings
# @param settings the authority configuration options
def authority(settings):
key = json.dumps(settings, sort_keys=True)
if key in authorities:
return authorities[key]
else:
acc_file = settings['account_key']
if os.path.isfile(acc_file):
log("Reading account key from {}".format(acc_file))
acc_key = tools.read_pem_file(acc_file, key=True)
else:
log("Account key not found at '{0}'. Creating key.".format(acc_file))
acc_key = tools.new_account_key(acc_file, settings['account_key_algorithm'], settings['account_key_length'])
authority_module = importlib.import_module("acertmgr.authority.{0}".format(settings["api"]))
authority_class = getattr(authority_module, "ACMEAuthority")
authority_obj = authority_class(settings, acc_key)
authorities[key] = authority_obj
return authority_obj
| 33.684211 | 120 | 0.698438 |
5fafc838a3c49dddd42c49207637d42ef431fdb1 | 2,841 | py | Python | ml/dish_detector/video_demo.py | jphacks/TK_1810 | 0c47c2a0eaeea47067e066a43def7ed079f77d2c | [
"MIT"
] | 6 | 2018-11-05T15:30:37.000Z | 2020-10-27T09:16:03.000Z | ml/dish_detector/video_demo.py | jphacks/TK_1810 | 0c47c2a0eaeea47067e066a43def7ed079f77d2c | [
"MIT"
] | 1 | 2018-10-28T02:02:25.000Z | 2018-10-28T02:02:25.000Z | ml/dish_detector/video_demo.py | jphacks/TK_1810 | 0c47c2a0eaeea47067e066a43def7ed079f77d2c | [
"MIT"
] | 3 | 2019-10-18T10:21:54.000Z | 2021-10-30T00:54:39.000Z | import argparse
import uuid
import shutil
from pathlib import Path
import subprocess as sp
from tqdm import tqdm
import torch
import cv2
import numpy as np
from moviepy.editor import VideoFileClip
from models import Darknet
parser = argparse.ArgumentParser()
parser.add_argument("input", type=Path)
parser.add_argument("output", type=Path)
parser.add_argument("--config", type=Path, default=Path("config/mymodel.cfg"))
parser.add_argument("--weight", type=Path, default=Path("result/normal_finetuning_aug_full_strong/35.pkl"))
parser.add_argument("--conf_thresh", type=float, default=0.7)
parser.add_argument("--nms_thresh", type=float, default=0.4)
args = parser.parse_args()
def read_video(path):
cap = cv2.VideoCapture(str(path))
info = {
'width': int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)),
'height': int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)),
'fps': int(cap.get(cv2.CAP_PROP_FPS))
}
video = []
while(True):
ret, img = cap.read()
if ret == True:
video.append(img)
else:
break
cap.release()
return np.asarray(video), info
def write_video(path, video, info):
frame, height, width, ch = video.shape
fourcc = cv2.VideoWriter_fourcc(*'XVID')
writer = cv2.VideoWriter(str(path), fourcc, info['fps'], (width, height))
for i in range(frame):
writer.write(video[i])
writer.release()
def main():
# model
model = Darknet(str(args.config), img_size=416)
model_wts = torch.load(str(args.weight), map_location='cpu')
model.load_state_dict(model_wts)
if torch.cuda.is_available():
print('gpu: available')
model = model.cuda()
else:
print('gpu: not available')
# read video
print(">> reading video...")
video, info = read_video(args.input)
video = video[:,:,:,::-1]
print(" shape:", video.shape)
print(" info: ", info)
# forward
print(">> predicting...")
imgs, bboxes, ss, labels = [], [], [], []
for i in tqdm(range(0, len(video))):
img = video[i]
bbox, max_s = model.predict(img, args.conf_thresh, args.nms_thresh)
imgs.append(img)
ss.append(max_s)
if len(bbox) != 0:
bboxes.append([bbox])
labels.append([0])
else:
bboxes.append([])
labels.append([])
# draw bbox
imgs = np.asarray(imgs)
# imgs = imgs[:,:,::-1]
for i in tqdm(range(len(imgs))):
if len(bboxes[i]) != 0:
ty, tx, by, bx = [int(n) for n in bboxes[i][0]]
cv2.rectangle(imgs[i], (tx, ty), (bx, by), (255, 0, 0), 3)
# save as video
print(">> saving video...")
imgs = imgs[:,:,:,::-1]
write_video(args.output, imgs, info)
if __name__ == "__main__":
main()
| 27.582524 | 107 | 0.596621 |
f805ec8ba1a3c1156033b691864d1a31bc60000b | 47,245 | py | Python | venv/Lib/site-packages/pandas/io/excel/_base.py | arnoyu-hub/COMP0016miemie | 59af664dcf190eab4f93cefb8471908717415fea | [
"MIT"
] | null | null | null | venv/Lib/site-packages/pandas/io/excel/_base.py | arnoyu-hub/COMP0016miemie | 59af664dcf190eab4f93cefb8471908717415fea | [
"MIT"
] | null | null | null | venv/Lib/site-packages/pandas/io/excel/_base.py | arnoyu-hub/COMP0016miemie | 59af664dcf190eab4f93cefb8471908717415fea | [
"MIT"
] | null | null | null | from __future__ import annotations
import abc
import datetime
from io import BytesIO
import os
from textwrap import fill
from typing import (
Any,
Mapping,
cast,
)
import warnings
import zipfile
from pandas._config import config
from pandas._libs.parsers import STR_NA_VALUES
from pandas._typing import (
Buffer,
DtypeArg,
FilePathOrBuffer,
StorageOptions,
)
from pandas.compat._optional import (
get_version,
import_optional_dependency,
)
from pandas.errors import EmptyDataError
from pandas.util._decorators import (
Appender,
deprecate_nonkeyword_arguments,
doc,
)
from pandas.util._exceptions import find_stack_level
from pandas.core.dtypes.common import (
is_bool,
is_float,
is_integer,
is_list_like,
)
from pandas.core.frame import DataFrame
from pandas.core.shared_docs import _shared_docs
from pandas.util.version import Version
from pandas.io.common import (
IOHandles,
get_handle,
stringify_path,
validate_header_arg,
)
from pandas.io.excel._util import (
fill_mi_header,
get_default_engine,
get_writer,
maybe_convert_usecols,
pop_header_name,
)
from pandas.io.parsers import TextParser
_read_excel_doc = (
"""
Read an Excel file into a pandas DataFrame.
Supports `xls`, `xlsx`, `xlsm`, `xlsb`, `odf`, `ods` and `odt` file extensions
read from a local filesystem or URL. Supports an option to read
a single sheet or a list of sheets.
Parameters
----------
io : str, bytes, ExcelFile, xlrd.Book, path object, or file-like object
Any valid string path is acceptable. The string could be a URL. Valid
URL schemes include http, ftp, s3, and file. For file URLs, a host is
expected. A local file could be: ``file://localhost/path/to/table.xlsx``.
If you want to pass in a path object, pandas accepts any ``os.PathLike``.
By file-like object, we refer to objects with a ``read()`` method,
such as a file handle (e.g. via builtin ``open`` function)
or ``StringIO``.
sheet_name : str, int, list, or None, default 0
Strings are used for sheet names. Integers are used in zero-indexed
sheet positions. Lists of strings/integers are used to request
multiple sheets. Specify None to get all sheets.
Available cases:
* Defaults to ``0``: 1st sheet as a `DataFrame`
* ``1``: 2nd sheet as a `DataFrame`
* ``"Sheet1"``: Load sheet with name "Sheet1"
* ``[0, 1, "Sheet5"]``: Load first, second and sheet named "Sheet5"
as a dict of `DataFrame`
* None: All sheets.
header : int, list of int, default 0
Row (0-indexed) to use for the column labels of the parsed
DataFrame. If a list of integers is passed those row positions will
be combined into a ``MultiIndex``. Use None if there is no header.
names : array-like, default None
List of column names to use. If file contains no header row,
then you should explicitly pass header=None.
index_col : int, list of int, default None
Column (0-indexed) to use as the row labels of the DataFrame.
Pass None if there is no such column. If a list is passed,
those columns will be combined into a ``MultiIndex``. If a
subset of data is selected with ``usecols``, index_col
is based on the subset.
usecols : int, str, list-like, or callable default None
* If None, then parse all columns.
* If str, then indicates comma separated list of Excel column letters
and column ranges (e.g. "A:E" or "A,C,E:F"). Ranges are inclusive of
both sides.
* If list of int, then indicates list of column numbers to be parsed.
* If list of string, then indicates list of column names to be parsed.
* If callable, then evaluate each column name against it and parse the
column if the callable returns ``True``.
Returns a subset of the columns according to behavior above.
squeeze : bool, default False
If the parsed data only contains one column then return a Series.
dtype : Type name or dict of column -> type, default None
Data type for data or columns. E.g. {'a': np.float64, 'b': np.int32}
Use `object` to preserve data as stored in Excel and not interpret dtype.
If converters are specified, they will be applied INSTEAD
of dtype conversion.
engine : str, default None
If io is not a buffer or path, this must be set to identify io.
Supported engines: "xlrd", "openpyxl", "odf", "pyxlsb".
Engine compatibility :
- "xlrd" supports old-style Excel files (.xls).
- "openpyxl" supports newer Excel file formats.
- "odf" supports OpenDocument file formats (.odf, .ods, .odt).
- "pyxlsb" supports Binary Excel files.
.. versionchanged:: 1.2.0
The engine `xlrd <https://xlrd.readthedocs.io/en/latest/>`_
now only supports old-style ``.xls`` files.
When ``engine=None``, the following logic will be
used to determine the engine:
- If ``path_or_buffer`` is an OpenDocument format (.odf, .ods, .odt),
then `odf <https://pypi.org/project/odfpy/>`_ will be used.
- Otherwise if ``path_or_buffer`` is an xls format,
``xlrd`` will be used.
- Otherwise if ``path_or_buffer`` is in xlsb format,
``pyxlsb`` will be used.
.. versionadded:: 1.3.0
- Otherwise ``openpyxl`` will be used.
.. versionchanged:: 1.3.0
converters : dict, default None
Dict of functions for converting values in certain columns. Keys can
either be integers or column labels, values are functions that take one
input argument, the Excel cell content, and return the transformed
content.
true_values : list, default None
Values to consider as True.
false_values : list, default None
Values to consider as False.
skiprows : list-like, int, or callable, optional
Line numbers to skip (0-indexed) or number of lines to skip (int) at the
start of the file. If callable, the callable function will be evaluated
against the row indices, returning True if the row should be skipped and
False otherwise. An example of a valid callable argument would be ``lambda
x: x in [0, 2]``.
nrows : int, default None
Number of rows to parse.
na_values : scalar, str, list-like, or dict, default None
Additional strings to recognize as NA/NaN. If dict passed, specific
per-column NA values. By default the following values are interpreted
as NaN: '"""
+ fill("', '".join(sorted(STR_NA_VALUES)), 70, subsequent_indent=" ")
+ """'.
keep_default_na : bool, default True
Whether or not to include the default NaN values when parsing the data.
Depending on whether `na_values` is passed in, the behavior is as follows:
* If `keep_default_na` is True, and `na_values` are specified, `na_values`
is appended to the default NaN values used for parsing.
* If `keep_default_na` is True, and `na_values` are not specified, only
the default NaN values are used for parsing.
* If `keep_default_na` is False, and `na_values` are specified, only
the NaN values specified `na_values` are used for parsing.
* If `keep_default_na` is False, and `na_values` are not specified, no
strings will be parsed as NaN.
Note that if `na_filter` is passed in as False, the `keep_default_na` and
`na_values` parameters will be ignored.
na_filter : bool, default True
Detect missing value markers (empty strings and the value of na_values). In
data without any NAs, passing na_filter=False can improve the performance
of reading a large file.
verbose : bool, default False
Indicate number of NA values placed in non-numeric columns.
parse_dates : bool, list-like, or dict, default False
The behavior is as follows:
* bool. If True -> try parsing the index.
* list of int or names. e.g. If [1, 2, 3] -> try parsing columns 1, 2, 3
each as a separate date column.
* list of lists. e.g. If [[1, 3]] -> combine columns 1 and 3 and parse as
a single date column.
* dict, e.g. {'foo' : [1, 3]} -> parse columns 1, 3 as date and call
result 'foo'
If a column or index contains an unparsable date, the entire column or
index will be returned unaltered as an object data type. If you don`t want to
parse some cells as date just change their type in Excel to "Text".
For non-standard datetime parsing, use ``pd.to_datetime`` after ``pd.read_excel``.
Note: A fast-path exists for iso8601-formatted dates.
date_parser : function, optional
Function to use for converting a sequence of string columns to an array of
datetime instances. The default uses ``dateutil.parser.parser`` to do the
conversion. Pandas will try to call `date_parser` in three different ways,
advancing to the next if an exception occurs: 1) Pass one or more arrays
(as defined by `parse_dates`) as arguments; 2) concatenate (row-wise) the
string values from the columns defined by `parse_dates` into a single array
and pass that; and 3) call `date_parser` once for each row using one or
more strings (corresponding to the columns defined by `parse_dates`) as
arguments.
thousands : str, default None
Thousands separator for parsing string columns to numeric. Note that
this parameter is only necessary for columns stored as TEXT in Excel,
any numeric columns will automatically be parsed, regardless of display
format.
comment : str, default None
Comments out remainder of line. Pass a character or characters to this
argument to indicate comments in the input file. Any data between the
comment string and the end of the current line is ignored.
skipfooter : int, default 0
Rows at the end to skip (0-indexed).
convert_float : bool, default True
Convert integral floats to int (i.e., 1.0 --> 1). If False, all numeric
data will be read in as floats: Excel stores all numbers as floats
internally.
.. deprecated:: 1.3.0
convert_float will be removed in a future version
mangle_dupe_cols : bool, default True
Duplicate columns will be specified as 'X', 'X.1', ...'X.N', rather than
'X'...'X'. Passing in False will cause data to be overwritten if there
are duplicate names in the columns.
storage_options : dict, optional
Extra options that make sense for a particular storage connection, e.g.
host, port, username, password, etc., if using a URL that will
be parsed by ``fsspec``, e.g., starting "s3://", "gcs://". An error
will be raised if providing this argument with a local path or
a file-like buffer. See the fsspec and backend storage implementation
docs for the set of allowed keys and values.
.. versionadded:: 1.2.0
Returns
-------
DataFrame or dict of DataFrames
DataFrame from the passed in Excel file. See notes in sheet_name
argument for more information on when a dict of DataFrames is returned.
See Also
--------
DataFrame.to_excel : Write DataFrame to an Excel file.
DataFrame.to_csv : Write DataFrame to a comma-separated values (csv) file.
read_csv : Read a comma-separated values (csv) file into DataFrame.
read_fwf : Read a table of fixed-width formatted lines into DataFrame.
Examples
--------
The file can be read using the file name as string or an open file object:
>>> pd.read_excel('tmp.xlsx', index_col=0) # doctest: +SKIP
Name Value
0 string1 1
1 string2 2
2 #Comment 3
>>> pd.read_excel(open('tmp.xlsx', 'rb'),
... sheet_name='Sheet3') # doctest: +SKIP
Unnamed: 0 Name Value
0 0 string1 1
1 1 string2 2
2 2 #Comment 3
Index and header can be specified via the `index_col` and `header` arguments
>>> pd.read_excel('tmp.xlsx', index_col=None, header=None) # doctest: +SKIP
0 1 2
0 NaN Name Value
1 0.0 string1 1
2 1.0 string2 2
3 2.0 #Comment 3
Column types are inferred but can be explicitly specified
>>> pd.read_excel('tmp.xlsx', index_col=0,
... dtype={'Name': str, 'Value': float}) # doctest: +SKIP
Name Value
0 string1 1.0
1 string2 2.0
2 #Comment 3.0
True, False, and NA values, and thousands separators have defaults,
but can be explicitly specified, too. Supply the values you would like
as strings or lists of strings!
>>> pd.read_excel('tmp.xlsx', index_col=0,
... na_values=['string1', 'string2']) # doctest: +SKIP
Name Value
0 NaN 1
1 NaN 2
2 #Comment 3
Comment lines in the excel input file can be skipped using the `comment` kwarg
>>> pd.read_excel('tmp.xlsx', index_col=0, comment='#') # doctest: +SKIP
Name Value
0 string1 1.0
1 string2 2.0
2 None NaN
"""
)
@deprecate_nonkeyword_arguments(allowed_args=["io", "sheet_name"], version="2.0")
@Appender(_read_excel_doc)
def read_excel(
io,
sheet_name=0,
header=0,
names=None,
index_col=None,
usecols=None,
squeeze=False,
dtype: DtypeArg | None = None,
engine=None,
converters=None,
true_values=None,
false_values=None,
skiprows=None,
nrows=None,
na_values=None,
keep_default_na=True,
na_filter=True,
verbose=False,
parse_dates=False,
date_parser=None,
thousands=None,
comment=None,
skipfooter=0,
convert_float=None,
mangle_dupe_cols=True,
storage_options: StorageOptions = None,
):
should_close = False
if not isinstance(io, ExcelFile):
should_close = True
io = ExcelFile(io, storage_options=storage_options, engine=engine)
elif engine and engine != io.engine:
raise ValueError(
"Engine should not be specified when passing "
"an ExcelFile - ExcelFile already has the engine set"
)
try:
data = io.parse(
sheet_name=sheet_name,
header=header,
names=names,
index_col=index_col,
usecols=usecols,
squeeze=squeeze,
dtype=dtype,
converters=converters,
true_values=true_values,
false_values=false_values,
skiprows=skiprows,
nrows=nrows,
na_values=na_values,
keep_default_na=keep_default_na,
na_filter=na_filter,
verbose=verbose,
parse_dates=parse_dates,
date_parser=date_parser,
thousands=thousands,
comment=comment,
skipfooter=skipfooter,
convert_float=convert_float,
mangle_dupe_cols=mangle_dupe_cols,
)
finally:
# make sure to close opened file handles
if should_close:
io.close()
return data
class BaseExcelReader(metaclass=abc.ABCMeta):
def __init__(self, filepath_or_buffer, storage_options: StorageOptions = None):
self.handles = IOHandles(
handle=filepath_or_buffer, compression={"method": None}
)
if not isinstance(filepath_or_buffer, (ExcelFile, self._workbook_class)):
self.handles = get_handle(
filepath_or_buffer, "rb", storage_options=storage_options, is_text=False
)
if isinstance(self.handles.handle, self._workbook_class):
self.book = self.handles.handle
elif hasattr(self.handles.handle, "read"):
# N.B. xlrd.Book has a read attribute too
self.handles.handle.seek(0)
try:
self.book = self.load_workbook(self.handles.handle)
except Exception:
self.close()
raise
elif isinstance(self.handles.handle, bytes):
self.book = self.load_workbook(BytesIO(self.handles.handle))
else:
raise ValueError(
"Must explicitly set engine if not passing in buffer or path for io."
)
@property
@abc.abstractmethod
def _workbook_class(self):
pass
@abc.abstractmethod
def load_workbook(self, filepath_or_buffer):
pass
def close(self):
if hasattr(self, "book") and hasattr(self.book, "close"):
# pyxlsb: opens a TemporaryFile
# openpyxl: https://stackoverflow.com/questions/31416842/
# openpyxl-does-not-close-excel-workbook-in-read-only-mode
self.book.close()
self.handles.close()
@property
@abc.abstractmethod
def sheet_names(self):
pass
@abc.abstractmethod
def get_sheet_by_name(self, name):
pass
@abc.abstractmethod
def get_sheet_by_index(self, index):
pass
@abc.abstractmethod
def get_sheet_data(self, sheet, convert_float):
pass
def raise_if_bad_sheet_by_index(self, index: int) -> None:
n_sheets = len(self.sheet_names)
if index >= n_sheets:
raise ValueError(
f"Worksheet index {index} is invalid, {n_sheets} worksheets found"
)
def raise_if_bad_sheet_by_name(self, name: str) -> None:
if name not in self.sheet_names:
raise ValueError(f"Worksheet named '{name}' not found")
def parse(
self,
sheet_name=0,
header=0,
names=None,
index_col=None,
usecols=None,
squeeze=False,
dtype: DtypeArg | None = None,
true_values=None,
false_values=None,
skiprows=None,
nrows=None,
na_values=None,
verbose=False,
parse_dates=False,
date_parser=None,
thousands=None,
comment=None,
skipfooter=0,
convert_float=None,
mangle_dupe_cols=True,
**kwds,
):
if convert_float is None:
convert_float = True
else:
stacklevel = find_stack_level()
warnings.warn(
"convert_float is deprecated and will be removed in a future version",
FutureWarning,
stacklevel=stacklevel,
)
validate_header_arg(header)
ret_dict = False
# Keep sheetname to maintain backwards compatibility.
if isinstance(sheet_name, list):
sheets = sheet_name
ret_dict = True
elif sheet_name is None:
sheets = self.sheet_names
ret_dict = True
else:
sheets = [sheet_name]
# handle same-type duplicates.
sheets = list(dict.fromkeys(sheets).keys())
output = {}
for asheetname in sheets:
if verbose:
print(f"Reading sheet {asheetname}")
if isinstance(asheetname, str):
sheet = self.get_sheet_by_name(asheetname)
else: # assume an integer if not a string
sheet = self.get_sheet_by_index(asheetname)
data = self.get_sheet_data(sheet, convert_float)
if hasattr(sheet, "close"):
# pyxlsb opens two TemporaryFiles
sheet.close()
usecols = maybe_convert_usecols(usecols)
if not data:
output[asheetname] = DataFrame()
continue
if is_list_like(header) and len(header) == 1:
header = header[0]
# forward fill and pull out names for MultiIndex column
header_names = None
if header is not None and is_list_like(header):
header_names = []
control_row = [True] * len(data[0])
for row in header:
if is_integer(skiprows):
row += skiprows
data[row], control_row = fill_mi_header(data[row], control_row)
if index_col is not None:
header_name, _ = pop_header_name(data[row], index_col)
header_names.append(header_name)
# If there is a MultiIndex header and an index then there is also
# a row containing just the index name(s)
has_index_names = (
is_list_like(header) and len(header) > 1 and index_col is not None
)
if is_list_like(index_col):
# Forward fill values for MultiIndex index.
if header is None:
offset = 0
elif not is_list_like(header):
offset = 1 + header
else:
offset = 1 + max(header)
# GH34673: if MultiIndex names present and not defined in the header,
# offset needs to be incremented so that forward filling starts
# from the first MI value instead of the name
if has_index_names:
offset += 1
# Check if we have an empty dataset
# before trying to collect data.
if offset < len(data):
for col in index_col:
last = data[offset][col]
for row in range(offset + 1, len(data)):
if data[row][col] == "" or data[row][col] is None:
data[row][col] = last
else:
last = data[row][col]
# GH 12292 : error when read one empty column from excel file
try:
parser = TextParser(
data,
names=names,
header=header,
index_col=index_col,
has_index_names=has_index_names,
squeeze=squeeze,
dtype=dtype,
true_values=true_values,
false_values=false_values,
skiprows=skiprows,
nrows=nrows,
na_values=na_values,
skip_blank_lines=False, # GH 39808
parse_dates=parse_dates,
date_parser=date_parser,
thousands=thousands,
comment=comment,
skipfooter=skipfooter,
usecols=usecols,
mangle_dupe_cols=mangle_dupe_cols,
**kwds,
)
output[asheetname] = parser.read(nrows=nrows)
if not squeeze or isinstance(output[asheetname], DataFrame):
if header_names:
output[asheetname].columns = output[
asheetname
].columns.set_names(header_names)
except EmptyDataError:
# No Data, return an empty DataFrame
output[asheetname] = DataFrame()
if ret_dict:
return output
else:
return output[asheetname]
class ExcelWriter(metaclass=abc.ABCMeta):
"""
Class for writing DataFrame objects into excel sheets.
Default is to use xlwt for xls, openpyxl for xlsx, odf for ods.
See DataFrame.to_excel for typical usage.
The writer should be used as a context manager. Otherwise, call `close()` to save
and close any opened file handles.
Parameters
----------
path : str or typing.BinaryIO
Path to xls or xlsx or ods file.
engine : str (optional)
Engine to use for writing. If None, defaults to
``io.excel.<extension>.writer``. NOTE: can only be passed as a keyword
argument.
.. deprecated:: 1.2.0
As the `xlwt <https://pypi.org/project/xlwt/>`__ package is no longer
maintained, the ``xlwt`` engine will be removed in a future
version of pandas.
date_format : str, default None
Format string for dates written into Excel files (e.g. 'YYYY-MM-DD').
datetime_format : str, default None
Format string for datetime objects written into Excel files.
(e.g. 'YYYY-MM-DD HH:MM:SS').
mode : {'w', 'a'}, default 'w'
File mode to use (write or append). Append does not work with fsspec URLs.
storage_options : dict, optional
Extra options that make sense for a particular storage connection, e.g.
host, port, username, password, etc., if using a URL that will
be parsed by ``fsspec``, e.g., starting "s3://", "gcs://".
.. versionadded:: 1.2.0
if_sheet_exists : {'error', 'new', 'replace'}, default 'error'
How to behave when trying to write to a sheet that already
exists (append mode only).
* error: raise a ValueError.
* new: Create a new sheet, with a name determined by the engine.
* replace: Delete the contents of the sheet before writing to it.
.. versionadded:: 1.3.0
engine_kwargs : dict, optional
Keyword arguments to be passed into the engine.
.. versionadded:: 1.3.0
**kwargs : dict, optional
Keyword arguments to be passed into the engine.
.. deprecated:: 1.3.0
Use engine_kwargs instead.
Attributes
----------
None
Methods
-------
None
Notes
-----
None of the methods and properties are considered public.
For compatibility with CSV writers, ExcelWriter serializes lists
and dicts to strings before writing.
Examples
--------
Default usage:
>>> df = pd.DataFrame([["ABC", "XYZ"]], columns=["Foo", "Bar"])
>>> with ExcelWriter("path_to_file.xlsx") as writer:
... df.to_excel(writer)
To write to separate sheets in a single file:
>>> df1 = pd.DataFrame([["AAA", "BBB"]], columns=["Spam", "Egg"])
>>> df2 = pd.DataFrame([["ABC", "XYZ"]], columns=["Foo", "Bar"])
>>> with ExcelWriter("path_to_file.xlsx") as writer:
... df1.to_excel(writer, sheet_name="Sheet1")
... df2.to_excel(writer, sheet_name="Sheet2")
You can set the date format or datetime format:
>>> from datetime import date, datetime
>>> df = pd.DataFrame(
... [
... [date(2014, 1, 31), date(1999, 9, 24)],
... [datetime(1998, 5, 26, 23, 33, 4), datetime(2014, 2, 28, 13, 5, 13)],
... ],
... index=["Date", "Datetime"],
... columns=["X", "Y"],
... )
>>> with ExcelWriter(
... "path_to_file.xlsx",
... date_format="YYYY-MM-DD",
... datetime_format="YYYY-MM-DD HH:MM:SS"
... ) as writer:
... df.to_excel(writer)
You can also append to an existing Excel file:
>>> with ExcelWriter("path_to_file.xlsx", mode="a", engine="openpyxl") as writer:
... df.to_excel(writer, sheet_name="Sheet3")
You can store Excel file in RAM:
>>> import io
>>> df = pd.DataFrame([["ABC", "XYZ"]], columns=["Foo", "Bar"])
>>> buffer = io.BytesIO()
>>> with pd.ExcelWriter(buffer) as writer:
... df.to_excel(writer)
You can pack Excel file into zip archive:
>>> import zipfile
>>> df = pd.DataFrame([["ABC", "XYZ"]], columns=["Foo", "Bar"])
>>> with zipfile.ZipFile("path_to_file.zip", "w") as zf:
... with zf.open("filename.xlsx", "w") as buffer:
... with pd.ExcelWriter(buffer) as writer:
... df.to_excel(writer)
"""
# Defining an ExcelWriter implementation (see abstract methods for more...)
# - Mandatory
# - ``write_cells(self, cells, sheet_name=None, startrow=0, startcol=0)``
# --> called to write additional DataFrames to disk
# - ``supported_extensions`` (tuple of supported extensions), used to
# check that engine supports the given extension.
# - ``engine`` - string that gives the engine name. Necessary to
# instantiate class directly and bypass ``ExcelWriterMeta`` engine
# lookup.
# - ``save(self)`` --> called to save file to disk
# - Mostly mandatory (i.e. should at least exist)
# - book, cur_sheet, path
# - Optional:
# - ``__init__(self, path, engine=None, **kwargs)`` --> always called
# with path as first argument.
# You also need to register the class with ``register_writer()``.
# Technically, ExcelWriter implementations don't need to subclass
# ExcelWriter.
def __new__(
cls,
path: FilePathOrBuffer | ExcelWriter,
engine=None,
date_format=None,
datetime_format=None,
mode: str = "w",
storage_options: StorageOptions = None,
if_sheet_exists: str | None = None,
engine_kwargs: dict | None = None,
**kwargs,
):
if kwargs:
if engine_kwargs is not None:
raise ValueError("Cannot use both engine_kwargs and **kwargs")
warnings.warn(
"Use of **kwargs is deprecated, use engine_kwargs instead.",
FutureWarning,
stacklevel=2,
)
# only switch class if generic(ExcelWriter)
if cls is ExcelWriter:
if engine is None or (isinstance(engine, str) and engine == "auto"):
if isinstance(path, str):
ext = os.path.splitext(path)[-1][1:]
else:
ext = "xlsx"
try:
engine = config.get_option(f"io.excel.{ext}.writer", silent=True)
if engine == "auto":
engine = get_default_engine(ext, mode="writer")
except KeyError as err:
raise ValueError(f"No engine for filetype: '{ext}'") from err
if engine == "xlwt":
xls_config_engine = config.get_option(
"io.excel.xls.writer", silent=True
)
# Don't warn a 2nd time if user has changed the default engine for xls
if xls_config_engine != "xlwt":
warnings.warn(
"As the xlwt package is no longer maintained, the xlwt "
"engine will be removed in a future version of pandas. "
"This is the only engine in pandas that supports writing "
"in the xls format. Install openpyxl and write to an xlsx "
"file instead. You can set the option io.excel.xls.writer "
"to 'xlwt' to silence this warning. While this option is "
"deprecated and will also raise a warning, it can "
"be globally set and the warning suppressed.",
FutureWarning,
stacklevel=4,
)
cls = get_writer(engine)
return object.__new__(cls)
# declare external properties you can count on
path = None
@property
@abc.abstractmethod
def supported_extensions(self):
"""Extensions that writer engine supports."""
pass
@property
@abc.abstractmethod
def engine(self):
"""Name of engine."""
pass
@abc.abstractmethod
def write_cells(
self, cells, sheet_name=None, startrow=0, startcol=0, freeze_panes=None
):
"""
Write given formatted cells into Excel an excel sheet
Parameters
----------
cells : generator
cell of formatted data to save to Excel sheet
sheet_name : str, default None
Name of Excel sheet, if None, then use self.cur_sheet
startrow : upper left cell row to dump data frame
startcol : upper left cell column to dump data frame
freeze_panes: int tuple of length 2
contains the bottom-most row and right-most column to freeze
"""
pass
@abc.abstractmethod
def save(self):
"""
Save workbook to disk.
"""
pass
def __init__(
self,
path: FilePathOrBuffer | ExcelWriter,
engine=None,
date_format=None,
datetime_format=None,
mode: str = "w",
storage_options: StorageOptions = None,
if_sheet_exists: str | None = None,
engine_kwargs: dict | None = None,
**kwargs,
):
# validate that this engine can handle the extension
if isinstance(path, str):
ext = os.path.splitext(path)[-1]
self.check_extension(ext)
# use mode to open the file
if "b" not in mode:
mode += "b"
# use "a" for the user to append data to excel but internally use "r+" to let
# the excel backend first read the existing file and then write any data to it
mode = mode.replace("a", "r+")
# cast ExcelWriter to avoid adding 'if self.handles is not None'
self.handles = IOHandles(cast(Buffer, path), compression={"copression": None})
if not isinstance(path, ExcelWriter):
self.handles = get_handle(
path, mode, storage_options=storage_options, is_text=False
)
self.sheets: dict[str, Any] = {}
self.cur_sheet = None
if date_format is None:
self.date_format = "YYYY-MM-DD"
else:
self.date_format = date_format
if datetime_format is None:
self.datetime_format = "YYYY-MM-DD HH:MM:SS"
else:
self.datetime_format = datetime_format
self.mode = mode
if if_sheet_exists not in [None, "error", "new", "replace"]:
raise ValueError(
f"'{if_sheet_exists}' is not valid for if_sheet_exists. "
"Valid options are 'error', 'new' and 'replace'."
)
if if_sheet_exists and "r+" not in mode:
raise ValueError("if_sheet_exists is only valid in append mode (mode='a')")
if if_sheet_exists is None:
if_sheet_exists = "error"
self.if_sheet_exists = if_sheet_exists
def __fspath__(self):
return getattr(self.handles.handle, "name", "")
def _get_sheet_name(self, sheet_name):
if sheet_name is None:
sheet_name = self.cur_sheet
if sheet_name is None: # pragma: no cover
raise ValueError("Must pass explicit sheet_name or set cur_sheet property")
return sheet_name
def _value_with_fmt(self, val):
"""
Convert numpy types to Python types for the Excel writers.
Parameters
----------
val : object
Value to be written into cells
Returns
-------
Tuple with the first element being the converted value and the second
being an optional format
"""
fmt = None
if is_integer(val):
val = int(val)
elif is_float(val):
val = float(val)
elif is_bool(val):
val = bool(val)
elif isinstance(val, datetime.datetime):
fmt = self.datetime_format
elif isinstance(val, datetime.date):
fmt = self.date_format
elif isinstance(val, datetime.timedelta):
val = val.total_seconds() / 86400
fmt = "0"
else:
val = str(val)
return val, fmt
@classmethod
def check_extension(cls, ext: str):
"""
checks that path's extension against the Writer's supported
extensions. If it isn't supported, raises UnsupportedFiletypeError.
"""
if ext.startswith("."):
ext = ext[1:]
# error: "Callable[[ExcelWriter], Any]" has no attribute "__iter__" (not
# iterable)
if not any(
ext in extension
for extension in cls.supported_extensions # type: ignore[attr-defined]
):
raise ValueError(f"Invalid extension for engine '{cls.engine}': '{ext}'")
else:
return True
# Allow use as a contextmanager
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def close(self):
"""synonym for save, to make it more file-like"""
content = self.save()
self.handles.close()
return content
XLS_SIGNATURES = (
b"\x09\x00\x04\x00\x07\x00\x10\x00", # BIFF2
b"\x09\x02\x06\x00\x00\x00\x10\x00", # BIFF3
b"\x09\x04\x06\x00\x00\x00\x10\x00", # BIFF4
b"\xD0\xCF\x11\xE0\xA1\xB1\x1A\xE1", # Compound File Binary
)
ZIP_SIGNATURE = b"PK\x03\x04"
PEEK_SIZE = max(map(len, XLS_SIGNATURES + (ZIP_SIGNATURE,)))
@doc(storage_options=_shared_docs["storage_options"])
def inspect_excel_format(
content_or_path: FilePathOrBuffer,
storage_options: StorageOptions = None,
) -> str | None:
"""
Inspect the path or content of an excel file and get its format.
Adopted from xlrd: https://github.com/python-excel/xlrd.
Parameters
----------
content_or_path : str or file-like object
Path to file or content of file to inspect. May be a URL.
{storage_options}
Returns
-------
str or None
Format of file if it can be determined.
Raises
------
ValueError
If resulting stream is empty.
BadZipFile
If resulting stream does not have an XLS signature and is not a valid zipfile.
"""
if isinstance(content_or_path, bytes):
content_or_path = BytesIO(content_or_path)
with get_handle(
content_or_path, "rb", storage_options=storage_options, is_text=False
) as handle:
stream = handle.handle
stream.seek(0)
buf = stream.read(PEEK_SIZE)
if buf is None:
raise ValueError("stream is empty")
else:
assert isinstance(buf, bytes)
peek = buf
stream.seek(0)
if any(peek.startswith(sig) for sig in XLS_SIGNATURES):
return "xls"
elif not peek.startswith(ZIP_SIGNATURE):
return None
# ZipFile typing is overly-strict
# https://github.com/python/typeshed/issues/4212
zf = zipfile.ZipFile(stream) # type: ignore[arg-type]
# Workaround for some third party files that use forward slashes and
# lower case names.
component_names = [name.replace("\\", "/").lower() for name in zf.namelist()]
if "xl/workbook.xml" in component_names:
return "xlsx"
if "xl/workbook.bin" in component_names:
return "xlsb"
if "content.xml" in component_names:
return "ods"
return "zip"
class ExcelFile:
"""
Class for parsing tabular excel sheets into DataFrame objects.
See read_excel for more documentation.
Parameters
----------
path_or_buffer : str, path object (pathlib.Path or py._path.local.LocalPath),
a file-like object, xlrd workbook or openpyxl workbook.
If a string or path object, expected to be a path to a
.xls, .xlsx, .xlsb, .xlsm, .odf, .ods, or .odt file.
engine : str, default None
If io is not a buffer or path, this must be set to identify io.
Supported engines: ``xlrd``, ``openpyxl``, ``odf``, ``pyxlsb``
Engine compatibility :
- ``xlrd`` supports old-style Excel files (.xls).
- ``openpyxl`` supports newer Excel file formats.
- ``odf`` supports OpenDocument file formats (.odf, .ods, .odt).
- ``pyxlsb`` supports Binary Excel files.
.. versionchanged:: 1.2.0
The engine `xlrd <https://xlrd.readthedocs.io/en/latest/>`_
now only supports old-style ``.xls`` files.
When ``engine=None``, the following logic will be
used to determine the engine:
- If ``path_or_buffer`` is an OpenDocument format (.odf, .ods, .odt),
then `odf <https://pypi.org/project/odfpy/>`_ will be used.
- Otherwise if ``path_or_buffer`` is an xls format,
``xlrd`` will be used.
- Otherwise if ``path_or_buffer`` is in xlsb format,
`pyxlsb <https://pypi.org/project/pyxlsb/>`_ will be used.
.. versionadded:: 1.3.0
- Otherwise if `openpyxl <https://pypi.org/project/openpyxl/>`_ is installed,
then ``openpyxl`` will be used.
- Otherwise if ``xlrd >= 2.0`` is installed, a ``ValueError`` will be raised.
- Otherwise ``xlrd`` will be used and a ``FutureWarning`` will be raised.
This case will raise a ``ValueError`` in a future version of pandas.
.. warning::
Please do not report issues when using ``xlrd`` to read ``.xlsx`` files.
This is not supported, switch to using ``openpyxl`` instead.
"""
from pandas.io.excel._odfreader import ODFReader
from pandas.io.excel._openpyxl import OpenpyxlReader
from pandas.io.excel._pyxlsb import PyxlsbReader
from pandas.io.excel._xlrd import XlrdReader
_engines: Mapping[str, Any] = {
"xlrd": XlrdReader,
"openpyxl": OpenpyxlReader,
"odf": ODFReader,
"pyxlsb": PyxlsbReader,
}
def __init__(
self, path_or_buffer, engine=None, storage_options: StorageOptions = None
):
if engine is not None and engine not in self._engines:
raise ValueError(f"Unknown engine: {engine}")
# Could be a str, ExcelFile, Book, etc.
self.io = path_or_buffer
# Always a string
self._io = stringify_path(path_or_buffer)
# Determine xlrd version if installed
if import_optional_dependency("xlrd", errors="ignore") is None:
xlrd_version = None
else:
import xlrd
xlrd_version = Version(get_version(xlrd))
ext = None
if engine is None:
# Only determine ext if it is needed
if xlrd_version is not None and isinstance(path_or_buffer, xlrd.Book):
ext = "xls"
else:
ext = inspect_excel_format(
content_or_path=path_or_buffer, storage_options=storage_options
)
if ext is None:
raise ValueError(
"Excel file format cannot be determined, you must specify "
"an engine manually."
)
engine = config.get_option(f"io.excel.{ext}.reader", silent=True)
if engine == "auto":
engine = get_default_engine(ext, mode="reader")
if engine == "xlrd" and xlrd_version is not None:
if ext is None:
# Need ext to determine ext in order to raise/warn
if isinstance(path_or_buffer, xlrd.Book):
ext = "xls"
else:
ext = inspect_excel_format(
path_or_buffer, storage_options=storage_options
)
# Pass through if ext is None, otherwise check if ext valid for xlrd
if ext and ext != "xls" and xlrd_version >= Version("2"):
raise ValueError(
f"Your version of xlrd is {xlrd_version}. In xlrd >= 2.0, "
f"only the xls format is supported. Install openpyxl instead."
)
elif ext and ext != "xls":
stacklevel = find_stack_level()
warnings.warn(
f"Your version of xlrd is {xlrd_version}. In xlrd >= 2.0, "
f"only the xls format is supported. Install "
f"openpyxl instead.",
FutureWarning,
stacklevel=stacklevel,
)
self.engine = engine
self.storage_options = storage_options
self._reader = self._engines[engine](self._io, storage_options=storage_options)
def __fspath__(self):
return self._io
def parse(
self,
sheet_name=0,
header=0,
names=None,
index_col=None,
usecols=None,
squeeze=False,
converters=None,
true_values=None,
false_values=None,
skiprows=None,
nrows=None,
na_values=None,
parse_dates=False,
date_parser=None,
thousands=None,
comment=None,
skipfooter=0,
convert_float=None,
mangle_dupe_cols=True,
**kwds,
):
"""
Parse specified sheet(s) into a DataFrame.
Equivalent to read_excel(ExcelFile, ...) See the read_excel
docstring for more info on accepted parameters.
Returns
-------
DataFrame or dict of DataFrames
DataFrame from the passed in Excel file.
"""
return self._reader.parse(
sheet_name=sheet_name,
header=header,
names=names,
index_col=index_col,
usecols=usecols,
squeeze=squeeze,
converters=converters,
true_values=true_values,
false_values=false_values,
skiprows=skiprows,
nrows=nrows,
na_values=na_values,
parse_dates=parse_dates,
date_parser=date_parser,
thousands=thousands,
comment=comment,
skipfooter=skipfooter,
convert_float=convert_float,
mangle_dupe_cols=mangle_dupe_cols,
**kwds,
)
@property
def book(self):
return self._reader.book
@property
def sheet_names(self):
return self._reader.sheet_names
def close(self):
"""close io if necessary"""
self._reader.close()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def __del__(self):
# Ensure we don't leak file descriptors, but put in try/except in case
# attributes are already deleted
try:
self.close()
except AttributeError:
pass
| 35.791667 | 89 | 0.582728 |
84dd85bed8c602d640e1489ae4faffa5ae5fad3c | 5,456 | py | Python | sdk/python/pulumi_azure_native/authorization/get_role_management_policy_assignment.py | sebtelko/pulumi-azure-native | 711ec021b5c73da05611c56c8a35adb0ce3244e4 | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/authorization/get_role_management_policy_assignment.py | sebtelko/pulumi-azure-native | 711ec021b5c73da05611c56c8a35adb0ce3244e4 | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/authorization/get_role_management_policy_assignment.py | sebtelko/pulumi-azure-native | 711ec021b5c73da05611c56c8a35adb0ce3244e4 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
__all__ = [
'GetRoleManagementPolicyAssignmentResult',
'AwaitableGetRoleManagementPolicyAssignmentResult',
'get_role_management_policy_assignment',
]
@pulumi.output_type
class GetRoleManagementPolicyAssignmentResult:
"""
Role management policy
"""
def __init__(__self__, id=None, name=None, policy_assignment_properties=None, policy_id=None, role_definition_id=None, scope=None, type=None):
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if policy_assignment_properties and not isinstance(policy_assignment_properties, dict):
raise TypeError("Expected argument 'policy_assignment_properties' to be a dict")
pulumi.set(__self__, "policy_assignment_properties", policy_assignment_properties)
if policy_id and not isinstance(policy_id, str):
raise TypeError("Expected argument 'policy_id' to be a str")
pulumi.set(__self__, "policy_id", policy_id)
if role_definition_id and not isinstance(role_definition_id, str):
raise TypeError("Expected argument 'role_definition_id' to be a str")
pulumi.set(__self__, "role_definition_id", role_definition_id)
if scope and not isinstance(scope, str):
raise TypeError("Expected argument 'scope' to be a str")
pulumi.set(__self__, "scope", scope)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def id(self) -> str:
"""
The role management policy Id.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> str:
"""
The role management policy name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="policyAssignmentProperties")
def policy_assignment_properties(self) -> 'outputs.PolicyAssignmentPropertiesResponse':
"""
Additional properties of scope, role definition and policy
"""
return pulumi.get(self, "policy_assignment_properties")
@property
@pulumi.getter(name="policyId")
def policy_id(self) -> Optional[str]:
"""
The policy id role management policy assignment.
"""
return pulumi.get(self, "policy_id")
@property
@pulumi.getter(name="roleDefinitionId")
def role_definition_id(self) -> Optional[str]:
"""
The role definition of management policy assignment.
"""
return pulumi.get(self, "role_definition_id")
@property
@pulumi.getter
def scope(self) -> Optional[str]:
"""
The role management policy scope.
"""
return pulumi.get(self, "scope")
@property
@pulumi.getter
def type(self) -> str:
"""
The role management policy type.
"""
return pulumi.get(self, "type")
class AwaitableGetRoleManagementPolicyAssignmentResult(GetRoleManagementPolicyAssignmentResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetRoleManagementPolicyAssignmentResult(
id=self.id,
name=self.name,
policy_assignment_properties=self.policy_assignment_properties,
policy_id=self.policy_id,
role_definition_id=self.role_definition_id,
scope=self.scope,
type=self.type)
def get_role_management_policy_assignment(role_management_policy_assignment_name: Optional[str] = None,
scope: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetRoleManagementPolicyAssignmentResult:
"""
Role management policy
API Version: 2020-10-01-preview.
:param str role_management_policy_assignment_name: The name of format {guid_guid} the role management policy assignment to get.
:param str scope: The scope of the role management policy.
"""
__args__ = dict()
__args__['roleManagementPolicyAssignmentName'] = role_management_policy_assignment_name
__args__['scope'] = scope
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:authorization:getRoleManagementPolicyAssignment', __args__, opts=opts, typ=GetRoleManagementPolicyAssignmentResult).value
return AwaitableGetRoleManagementPolicyAssignmentResult(
id=__ret__.id,
name=__ret__.name,
policy_assignment_properties=__ret__.policy_assignment_properties,
policy_id=__ret__.policy_id,
role_definition_id=__ret__.role_definition_id,
scope=__ret__.scope,
type=__ret__.type)
| 37.369863 | 171 | 0.671188 |
3de13e359a486c71d3ae18dcb499e4667b6831e3 | 854 | py | Python | niapy/tests/test_gsa.py | chinmay3/NiaPy | b4e5c0f98063e2a9eebd8d750f0922cfca88bc55 | [
"MIT"
] | null | null | null | niapy/tests/test_gsa.py | chinmay3/NiaPy | b4e5c0f98063e2a9eebd8d750f0922cfca88bc55 | [
"MIT"
] | 1 | 2021-08-13T07:52:40.000Z | 2021-08-16T08:52:20.000Z | niapy/tests/test_gsa.py | chinmay3/NiaPy | b4e5c0f98063e2a9eebd8d750f0922cfca88bc55 | [
"MIT"
] | 2 | 2021-08-08T08:29:53.000Z | 2021-08-12T15:31:55.000Z | # encoding=utf8
from niapy.algorithms.basic import GravitationalSearchAlgorithm
from niapy.tests.test_algorithm import AlgorithmTestCase, MyProblem
class GSATestCase(AlgorithmTestCase):
def setUp(self):
AlgorithmTestCase.setUp(self)
self.algo = GravitationalSearchAlgorithm
def test_Custom(self):
gsa_custom = self.algo(population_size=10, seed=self.seed)
gsa_customc = self.algo(population_size=10, seed=self.seed)
AlgorithmTestCase.test_algorithm_run(self, gsa_custom, gsa_customc, MyProblem())
def test_griewank(self):
gsa_griewank = self.algo(population_size=10, seed=self.seed)
gsa_griewankc = self.algo(population_size=10, seed=self.seed)
AlgorithmTestCase.test_algorithm_run(self, gsa_griewank, gsa_griewankc)
# vim: tabstop=3 noexpandtab shiftwidth=3 softtabstop=3
| 38.818182 | 88 | 0.757611 |
cb24d43183465c85f011879c4f9d0ef235091087 | 10,784 | py | Python | Project/view/app.py | Bogo56/AdCapture_bot | 7e452561cf564bd702f5fb7ee034a4eea05b0075 | [
"RSA-MD"
] | null | null | null | Project/view/app.py | Bogo56/AdCapture_bot | 7e452561cf564bd702f5fb7ee034a4eea05b0075 | [
"RSA-MD"
] | null | null | null | Project/view/app.py | Bogo56/AdCapture_bot | 7e452561cf564bd702f5fb7ee034a4eea05b0075 | [
"RSA-MD"
] | null | null | null | import time
import os, sys
from kivy.resources import resource_add_path, resource_find
from kivy.app import App
from kivy.core.window import Window
from kivy.uix.screenmanager import ScreenManager,Screen
from kivy.lang import Builder
from kivy.factory import Factory
from kivy.properties import ObjectProperty
from Project.model.model import Model
from Project.modules.controller import CaptureBot
from kivy.config import Config
"""
This is the main module used for managing the Graphical Interface (GUI). It is based on the kivy framework
It connects the User interface of the app with the controller module and it's CaptureBot class
"""
Config.set('graphics', 'width', '800')
Config.set('graphics', 'height', '600')
Config.set('kivy','window_icon','resources/icons/1260673.png')
Config.write()
Builder.load_file("capture_menu.kv")
Builder.load_file("database.kv")
Builder.load_file("add_page.kv")
Builder.load_file("fast_flow.kv")
class MainMenuScreen(Screen):
pass
class CaptureMenuScreen(Screen):
pages_list = set()
added_pages = ObjectProperty(None)
def add_to_list(self):
id = self.ids.id.text
page = self.ids.page_name.text
if id and page:
check_id = self._check_id_input(input=id)
if check_id:
id = int(id)
if len(self.pages_list)>0:
id_list = [id[1] for id in self.pages_list]
if id in id_list:
self.ids.scroll_two.text = "ID Already Added"
return None
page_id = (page,id)
self._add_page_button(page, id)
self.pages_list.add(page_id)
self._log_color("black")
self.ids.scroll_two.text = "Added to List"
print(page_id)
print(self.pages_list)
else:
self.ids.scroll_two.text = "Must Fill Both Fields"
def add_from_db(self):
all_pages=Model.get_all()
all_pages = [page[1:] for page in all_pages ]
for page in all_pages:
self._add_page_button(page_name=page[0],
page_id=page[1])
self.pages_list.add(page)
print(all_pages)
def remove_button(self,btn):
btn_id=int(btn.id)
for item in self.pages_list:
if btn_id in item:
page = item
self.pages_list.discard(page)
self.added_pages.remove_widget(btn)
print(self.pages_list)
def clear_list(self):
self.pages_list.clear()
self.added_pages.clear_widgets()
def capture_pages(self):
check_options = self._check_options()
if check_options:
country = self.ids.country.text
scrolls = int(self.ids.scroll.text)
if len(self.pages_list)>0:
self._log_color("black")
self.ids.scroll_two.text = "Started Capturing...."
pages=list(self.pages_list)
res = CaptureBot.capture_pages(pages=pages,
country=country,
scrolls=scrolls)
self.clear_list()
self.ids.scroll_two.text = res
else:
self.ids.scroll_two.text = "Add a Page First"
def capture_keyword(self):
check_1 = self._check_keyword()
check_2 = self._check_options_key()
if check_1 and check_2 :
country = self.ids.key_country.text
scrolls = int(self.ids.key_scroll.text)
keyword = str(self.ids.keyword.text)
res = CaptureBot.capture_keyword(keyword=keyword,
country=country,
scrolls=scrolls)
self._log_color("black")
self.ids.scroll_two.text = res
print(keyword)
def convert_to_pdf(self):
quality = int(self.ids.quality.text)
if self.ids.folder.text == "":
folder = None
default = True
else:
folder = self.ids.folder.text
default = False
print(folder)
try:
res = CaptureBot.to_pdf(default=default,
quality=quality,
specify_folder=folder)
self._log_color("black")
self.ids.scroll_two.text = res[0]
return res
except:
self.ids.scroll_two.text = res[0]
def convert_send(self):
res = self.convert_to_pdf()
time.sleep(1)
if res:
try:
CaptureBot.send_email(file=res[1])
self._log_color("black")
self.ids.scroll_two.text = "Email Sent"
except:
self._log_color("red")
self.ids.scroll_two.text = "Email Failed"
# Private methods used inside of the other methods
def _add_page_button(self,page_name,page_id):
page_button = Factory.ListButton(text=f"{page_name}")
id_list = [id[1] for id in self.pages_list]
if page_id not in id_list:
page_button.id = page_id
page_button.bind(on_press=self.remove_button)
self.added_pages.add_widget(page_button)
# Validating options chosen
def _check_options(self):
if self.ids.scroll.text == "Scrolls" or self.ids.country.text == "Country":
self._log_color("red")
self.ids.scroll_two.text = "Choose Scroll and Country Option"
return False
else:
return True
# Validating used for the same things but connected to another widget
def _check_options_key(self):
if self.ids.key_scroll.text == "Scrolls" or self.ids.key_country.text == "Country":
self._log_color("red")
self.ids.scroll_two.text = "Choose Scroll and Country Option"
return False
else:
return True
def _check_keyword(self):
if self.ids.keyword.text == "":
self._log_color("red")
self.ids.scroll_two.text = "Enter a Keyword"
return False
else:
return True
def _check_id_input(self,input):
if not input.isnumeric():
self._log_color("red")
self.ids.scroll_two.text = "Please Enter Only Numbers in the ID Field"
return False
else:
return True
def _check_quality(self):
if self.ids.keyword.text == "":
self._log_color("red")
self.ids.scroll_two.text = "Choose quality percent"
return False
else:
return True
# Changing console message color
def _log_color(self,color):
if color == "red":
self.ids.scroll_two.color = (1, 0, 0, 0.7)
else:
self.ids.scroll_two.color = (0, 0, 0, 0.7)
class DataBaseScreen(Screen):
def add_email(self):
check = self._check_field()
if check:
self.ids.profile_log.text = ""
email = self.ids.user_email.text
body = self.ids.email_body.text
res = CaptureBot.insert_user_to_db(email=email,
email_body=body)
self._log_color("black")
self.ids.profile_log.text = res
def _check_field(self):
if self.ids.user_email.text == "" or self.ids.email_body.text == "":
self._log_color("red")
self.ids.profile_log.text = "Please fill Email and Default Body Fields"
return False
else:
return True
def _log_color(self,color):
if color == "red":
self.ids.profile_log.color = (1, 0, 0, 0.7)
else:
self.ids.profile_log.color = (0, 0, 0, 0.8)
class AddPagesScreen(Screen):
def add_page(self):
check = self._check_field()
if check:
id = self.ids.add_page_id.text
page = self.ids.add_page_n.text
check_id = self._check_id_input(input=id)
if check_id:
id = int(id)
res = CaptureBot.insert_page_to_db(page_id=id,
page_name=page)
self.ids.db_log.text = res
def find_page(self):
page_name = self.ids.find_page.text
res = CaptureBot.find_page(page_name=page_name)
output_text = ""
for page in res:
output_text += f"{page}\n"
self.ids.db_console.text = output_text
def delete_all(self):
res = CaptureBot.delete_all_pages()
self._log_color("black")
self.ids.db_log.text = res
def remove_page(self):
id = self.ids.remove_page.text
check_id = self._check_id_input(input=id)
if check_id:
id = int(id)
res = CaptureBot.delete_page(page_id=id)
self._log_color("black")
self.ids.db_log.text = res
def _check_field(self):
if self.ids.add_page_n.text == "" or self.ids.add_page_id.text == "":
self._log_color("red")
self.ids.db_log.text = "Please fill Both Fields"
return False
else:
return True
def _check_id_input(self,input):
if not input.isnumeric():
self._log_color("red")
self.ids.db_log.text = "Please Enter Only Numbers in the ID Field"
return False
else:
return True
def _log_color(self,color):
if color == "red":
self.ids.db_log.color = (1, 0, 0, 0.7)
else:
self.ids.db_log.color = (0, 0, 0, 0.7)
class FastFlowScreen(Screen):
def fast_capture(self):
page_num = len(Model.get_all())
res = CaptureBot.capture_from_database()
self.ids.scroll_text.text = res
if page_num <= 7:
quality = 90
elif 7 < page_num <= 14:
quality = 80
elif 14 < page_num <= 21:
quality = 70
elif page_num > 21:
quality = 60
res = CaptureBot.to_pdf(quality=quality)
try:
CaptureBot.send_email(file=res[1])
self.ids.sent_label.text = "EMAIL SENT"
except:
self.ids.sent_label.color = (1,0,0,0.7)
self.ids.sent_label.text = "EMAIL FAILED"
class ScreenSwitch(ScreenManager):
pass
class XBotApp(App):
def build(self):
Window.clearcolor = (247/255,247/255,247/255,1)
return ScreenSwitch()
if __name__ == '__main__':
if hasattr(sys, '_MEIPASS'):
resource_add_path(os.path.join(sys._MEIPASS))
XBotApp().run() | 32.481928 | 106 | 0.563891 |
6a15b62f0d863535d6708b167aa04b047a174da5 | 229 | py | Python | xylophone/pgw/ConfigurationService.py | Turysaz/pyframework | da44b8127aa6b89d6cdb3bdb564c386520b37e22 | [
"MIT"
] | null | null | null | xylophone/pgw/ConfigurationService.py | Turysaz/pyframework | da44b8127aa6b89d6cdb3bdb564c386520b37e22 | [
"MIT"
] | 6 | 2018-04-09T20:57:14.000Z | 2018-04-09T21:18:12.000Z | xylophone/pgw/ConfigurationService.py | Turysaz/xylophone | da44b8127aa6b89d6cdb3bdb564c386520b37e22 | [
"MIT"
] | null | null | null | # Copyright (c) 2018 Turysaz <turysaz@posteo.org>
from configparser import ConfigParser
def create_configuration_parser():
configuration = ConfigParser()
configuration.read("configuration.txt")
return configuration
| 25.444444 | 49 | 0.777293 |
3c69a3d679dabcf31e945181e234ac233d30f225 | 18,197 | py | Python | saleor/order/models.py | TheSwordBreaker/saleor | 9805909f9fcbda8be99d7836ef87262a07357501 | [
"CC-BY-4.0"
] | null | null | null | saleor/order/models.py | TheSwordBreaker/saleor | 9805909f9fcbda8be99d7836ef87262a07357501 | [
"CC-BY-4.0"
] | null | null | null | saleor/order/models.py | TheSwordBreaker/saleor | 9805909f9fcbda8be99d7836ef87262a07357501 | [
"CC-BY-4.0"
] | null | null | null | from decimal import Decimal
from operator import attrgetter
from re import match
from typing import Optional
from uuid import uuid4
from django.conf import settings
from django.core.validators import MinValueValidator
from django.db import models
from django.db.models import JSONField # type: ignore
from django.db.models import F, Max, Sum
from django.utils.timezone import now
from django_measurement.models import MeasurementField
from django_prices.models import MoneyField, TaxedMoneyField
from measurement.measures import Weight
from prices import Money
from ..account.models import Address
from ..channel.models import Channel
from ..core.models import ModelWithMetadata
from ..core.permissions import OrderPermissions
from ..core.taxes import zero_money, zero_taxed_money
from ..core.utils.json_serializer import CustomJsonEncoder
from ..core.weight import WeightUnits, zero_weight
from ..discount.models import Voucher
from ..giftcard.models import GiftCard
from ..payment import ChargeStatus, TransactionKind
from ..shipping.models import ShippingMethod
from . import FulfillmentStatus, OrderEvents, OrderStatus
class OrderQueryset(models.QuerySet):
def get_by_checkout_token(self, token):
"""Return non-draft order with matched checkout token."""
return self.confirmed().filter(checkout_token=token).first()
def confirmed(self):
"""Return orders that aren't draft or unconfirmed."""
return self.exclude(status__in=[OrderStatus.DRAFT, OrderStatus.UNCONFIRMED])
def non_draft(self):
"""Return orders that aren't draft."""
return self.exclude(status=OrderStatus.DRAFT)
def drafts(self):
"""Return draft orders."""
return self.filter(status=OrderStatus.DRAFT)
def ready_to_fulfill(self):
"""Return orders that can be fulfilled.
Orders ready to fulfill are fully paid but unfulfilled (or partially
fulfilled).
"""
statuses = {OrderStatus.UNFULFILLED, OrderStatus.PARTIALLY_FULFILLED}
qs = self.filter(status__in=statuses, payments__is_active=True)
qs = qs.annotate(amount_paid=Sum("payments__captured_amount"))
return qs.filter(total_gross_amount__lte=F("amount_paid"))
def ready_to_capture(self):
"""Return orders with payments to capture.
Orders ready to capture are those which are not draft or canceled and
have a preauthorized payment. The preauthorized payment can not
already be partially or fully captured.
"""
qs = self.filter(
payments__is_active=True, payments__charge_status=ChargeStatus.NOT_CHARGED
)
qs = qs.exclude(status={OrderStatus.DRAFT, OrderStatus.CANCELED})
return qs.distinct()
def ready_to_confirm(self):
"""Return unconfirmed_orders."""
return self.filter(status=OrderStatus.UNCONFIRMED)
class Order(ModelWithMetadata):
created = models.DateTimeField(default=now, editable=False)
status = models.CharField(
max_length=32, default=OrderStatus.UNFULFILLED, choices=OrderStatus.CHOICES
)
user = models.ForeignKey(
settings.AUTH_USER_MODEL,
blank=True,
null=True,
related_name="orders",
on_delete=models.SET_NULL,
)
language_code = models.CharField(max_length=35, default=settings.LANGUAGE_CODE)
tracking_client_id = models.CharField(max_length=36, blank=True, editable=False)
billing_address = models.ForeignKey(
Address, related_name="+", editable=False, null=True, on_delete=models.SET_NULL
)
shipping_address = models.ForeignKey(
Address, related_name="+", editable=False, null=True, on_delete=models.SET_NULL
)
user_email = models.EmailField(blank=True, default="")
currency = models.CharField(max_length=settings.DEFAULT_CURRENCY_CODE_LENGTH,)
shipping_method = models.ForeignKey(
ShippingMethod,
blank=True,
null=True,
related_name="orders",
on_delete=models.SET_NULL,
)
shipping_method_name = models.CharField(
max_length=255, null=True, default=None, blank=True, editable=False
)
channel = models.ForeignKey(
Channel, related_name="orders", on_delete=models.PROTECT,
)
shipping_price_net_amount = models.DecimalField(
max_digits=settings.DEFAULT_MAX_DIGITS,
decimal_places=settings.DEFAULT_DECIMAL_PLACES,
default=0,
editable=False,
)
shipping_price_net = MoneyField(
amount_field="shipping_price_net_amount", currency_field="currency"
)
shipping_price_gross_amount = models.DecimalField(
max_digits=settings.DEFAULT_MAX_DIGITS,
decimal_places=settings.DEFAULT_DECIMAL_PLACES,
default=0,
editable=False,
)
shipping_price_gross = MoneyField(
amount_field="shipping_price_gross_amount", currency_field="currency"
)
shipping_price = TaxedMoneyField(
net_amount_field="shipping_price_net_amount",
gross_amount_field="shipping_price_gross_amount",
currency_field="currency",
)
token = models.CharField(max_length=36, unique=True, blank=True)
# Token of a checkout instance that this order was created from
checkout_token = models.CharField(max_length=36, blank=True)
total_net_amount = models.DecimalField(
max_digits=settings.DEFAULT_MAX_DIGITS,
decimal_places=settings.DEFAULT_DECIMAL_PLACES,
default=0,
)
total_net = MoneyField(amount_field="total_net_amount", currency_field="currency")
total_gross_amount = models.DecimalField(
max_digits=settings.DEFAULT_MAX_DIGITS,
decimal_places=settings.DEFAULT_DECIMAL_PLACES,
default=0,
)
total_gross = MoneyField(
amount_field="total_gross_amount", currency_field="currency"
)
total = TaxedMoneyField(
net_amount_field="total_net_amount",
gross_amount_field="total_gross_amount",
currency_field="currency",
)
voucher = models.ForeignKey(
Voucher, blank=True, null=True, related_name="+", on_delete=models.SET_NULL
)
gift_cards = models.ManyToManyField(GiftCard, blank=True, related_name="orders")
discount_amount = models.DecimalField(
max_digits=settings.DEFAULT_MAX_DIGITS,
decimal_places=settings.DEFAULT_DECIMAL_PLACES,
default=0,
)
discount = MoneyField(amount_field="discount_amount", currency_field="currency")
discount_name = models.CharField(max_length=255, blank=True, null=True)
translated_discount_name = models.CharField(max_length=255, blank=True, null=True)
display_gross_prices = models.BooleanField(default=True)
customer_note = models.TextField(blank=True, default="")
weight = MeasurementField(
measurement=Weight, unit_choices=WeightUnits.CHOICES, default=zero_weight
)
redirect_url = models.URLField(blank=True, null=True)
objects = OrderQueryset.as_manager()
class Meta:
ordering = ("-pk",)
permissions = ((OrderPermissions.MANAGE_ORDERS.codename, "Manage orders."),)
def save(self, *args, **kwargs):
if not self.token:
self.token = str(uuid4())
return super().save(*args, **kwargs)
def is_fully_paid(self):
total_paid = self._total_paid()
return total_paid.gross >= self.total.gross
def is_partly_paid(self):
total_paid = self._total_paid()
return total_paid.gross.amount > 0
def get_customer_email(self):
return self.user.email if self.user else self.user_email
def _total_paid(self):
# Get total paid amount from partially charged,
# fully charged and partially refunded payments
payments = self.payments.filter(
charge_status__in=[
ChargeStatus.PARTIALLY_CHARGED,
ChargeStatus.FULLY_CHARGED,
ChargeStatus.PARTIALLY_REFUNDED,
]
)
total_captured = [payment.get_captured_amount() for payment in payments]
total_paid = sum(total_captured, zero_taxed_money(currency=self.currency))
return total_paid
def _index_billing_phone(self):
return self.billing_address.phone
def _index_shipping_phone(self):
return self.shipping_address.phone
def __iter__(self):
return iter(self.lines.all())
def __repr__(self):
return "<Order #%r>" % (self.id,)
def __str__(self):
return "#%d" % (self.id,)
def get_last_payment(self):
return max(self.payments.all(), default=None, key=attrgetter("pk"))
def get_payment_status(self):
last_payment = self.get_last_payment()
if last_payment:
return last_payment.charge_status
return ChargeStatus.NOT_CHARGED
def get_payment_status_display(self):
last_payment = self.get_last_payment()
if last_payment:
return last_payment.get_charge_status_display()
return dict(ChargeStatus.CHOICES).get(ChargeStatus.NOT_CHARGED)
def is_pre_authorized(self):
return (
self.payments.filter(
is_active=True,
transactions__kind=TransactionKind.AUTH,
transactions__action_required=False,
)
.filter(transactions__is_success=True)
.exists()
)
def is_captured(self):
return (
self.payments.filter(
is_active=True,
transactions__kind=TransactionKind.CAPTURE,
transactions__action_required=False,
)
.filter(transactions__is_success=True)
.exists()
)
@property
def quantity_fulfilled(self):
return sum([line.quantity_fulfilled for line in self])
def is_shipping_required(self):
return any(line.is_shipping_required for line in self)
def get_subtotal(self):
subtotal_iterator = (line.get_total() for line in self)
return sum(subtotal_iterator, zero_taxed_money(currency=self.currency))
def get_total_quantity(self):
return sum([line.quantity for line in self])
def is_draft(self):
return self.status == OrderStatus.DRAFT
def is_open(self):
statuses = {OrderStatus.UNFULFILLED, OrderStatus.PARTIALLY_FULFILLED}
return self.status in statuses
def can_cancel(self):
return (
not self.fulfillments.exclude(status=FulfillmentStatus.CANCELED).exists()
) and self.status not in {OrderStatus.CANCELED, OrderStatus.DRAFT}
def can_capture(self, payment=None):
if not payment:
payment = self.get_last_payment()
if not payment:
return False
order_status_ok = self.status not in {OrderStatus.DRAFT, OrderStatus.CANCELED}
return payment.can_capture() and order_status_ok
def can_void(self, payment=None):
if not payment:
payment = self.get_last_payment()
if not payment:
return False
return payment.can_void()
def can_refund(self, payment=None):
if not payment:
payment = self.get_last_payment()
if not payment:
return False
return payment.can_refund()
def can_mark_as_paid(self):
return len(self.payments.all()) == 0
@property
def total_authorized(self):
payment = self.get_last_payment()
if payment:
return payment.get_authorized_amount()
return zero_money(self.currency)
@property
def total_captured(self):
payment = self.get_last_payment()
if payment and payment.charge_status in (
ChargeStatus.PARTIALLY_CHARGED,
ChargeStatus.FULLY_CHARGED,
ChargeStatus.PARTIALLY_REFUNDED,
):
return Money(payment.captured_amount, payment.currency)
return zero_money(self.currency)
@property
def total_balance(self):
return self.total_captured - self.total.gross
def get_total_weight(self, *_args):
return self.weight
class OrderLineQueryset(models.QuerySet):
def digital(self):
"""Return lines with digital products."""
for line in self.all():
if line.is_digital:
yield line
def physical(self):
"""Return lines with physical products."""
for line in self.all():
if not line.is_digital:
yield line
class OrderLine(models.Model):
order = models.ForeignKey(
Order, related_name="lines", editable=False, on_delete=models.CASCADE
)
variant = models.ForeignKey(
"product.ProductVariant",
related_name="order_lines",
on_delete=models.SET_NULL,
blank=True,
null=True,
)
# max_length is as produced by ProductVariant's display_product method
product_name = models.CharField(max_length=386)
variant_name = models.CharField(max_length=255, default="", blank=True)
translated_product_name = models.CharField(max_length=386, default="", blank=True)
translated_variant_name = models.CharField(max_length=255, default="", blank=True)
product_sku = models.CharField(max_length=255)
is_shipping_required = models.BooleanField()
quantity = models.IntegerField(validators=[MinValueValidator(1)])
quantity_fulfilled = models.IntegerField(
validators=[MinValueValidator(0)], default=0
)
currency = models.CharField(max_length=settings.DEFAULT_CURRENCY_CODE_LENGTH,)
unit_price_net_amount = models.DecimalField(
max_digits=settings.DEFAULT_MAX_DIGITS,
decimal_places=settings.DEFAULT_DECIMAL_PLACES,
)
unit_price_net = MoneyField(
amount_field="unit_price_net_amount", currency_field="currency"
)
unit_price_gross_amount = models.DecimalField(
max_digits=settings.DEFAULT_MAX_DIGITS,
decimal_places=settings.DEFAULT_DECIMAL_PLACES,
)
unit_price_gross = MoneyField(
amount_field="unit_price_gross_amount", currency_field="currency"
)
unit_price = TaxedMoneyField(
net_amount_field="unit_price_net_amount",
gross_amount_field="unit_price_gross_amount",
currency="currency",
)
tax_rate = models.DecimalField(
max_digits=5, decimal_places=2, default=Decimal("0.0")
)
objects = OrderLineQueryset.as_manager()
class Meta:
ordering = ("pk",)
def __str__(self):
return (
f"{self.product_name} ({self.variant_name})"
if self.variant_name
else self.product_name
)
def get_total(self):
return self.unit_price * self.quantity
@property
def quantity_unfulfilled(self):
return self.quantity - self.quantity_fulfilled
@property
def is_digital(self) -> Optional[bool]:
"""Check if a variant is digital and contains digital content."""
if not self.variant:
return None
is_digital = self.variant.is_digital()
has_digital = hasattr(self.variant, "digital_content")
return is_digital and has_digital
class Fulfillment(ModelWithMetadata):
fulfillment_order = models.PositiveIntegerField(editable=False)
order = models.ForeignKey(
Order, related_name="fulfillments", editable=False, on_delete=models.CASCADE
)
status = models.CharField(
max_length=32,
default=FulfillmentStatus.FULFILLED,
choices=FulfillmentStatus.CHOICES,
)
tracking_number = models.CharField(max_length=255, default="", blank=True)
created = models.DateTimeField(auto_now_add=True)
class Meta:
ordering = ("pk",)
def __str__(self):
return f"Fulfillment #{self.composed_id}"
def __iter__(self):
return iter(self.lines.all())
def save(self, *args, **kwargs):
"""Assign an auto incremented value as a fulfillment order."""
if not self.pk:
groups = self.order.fulfillments.all()
existing_max = groups.aggregate(Max("fulfillment_order"))
existing_max = existing_max.get("fulfillment_order__max")
self.fulfillment_order = existing_max + 1 if existing_max is not None else 1
return super().save(*args, **kwargs)
@property
def composed_id(self):
return "%s-%s" % (self.order.id, self.fulfillment_order)
def can_edit(self):
return self.status != FulfillmentStatus.CANCELED
def get_total_quantity(self):
return sum([line.quantity for line in self])
@property
def is_tracking_number_url(self):
return bool(match(r"^[-\w]+://", self.tracking_number))
class FulfillmentLine(models.Model):
order_line = models.ForeignKey(
OrderLine, related_name="fulfillment_lines", on_delete=models.CASCADE
)
fulfillment = models.ForeignKey(
Fulfillment, related_name="lines", on_delete=models.CASCADE
)
quantity = models.PositiveIntegerField()
stock = models.ForeignKey(
"warehouse.Stock",
related_name="fulfillment_lines",
on_delete=models.SET_NULL,
blank=True,
null=True,
)
class OrderEvent(models.Model):
"""Model used to store events that happened during the order lifecycle.
Args:
parameters: Values needed to display the event on the storefront
type: Type of an order
"""
date = models.DateTimeField(default=now, editable=False)
type = models.CharField(
max_length=255,
choices=[
(type_name.upper(), type_name) for type_name, _ in OrderEvents.CHOICES
],
)
order = models.ForeignKey(Order, related_name="events", on_delete=models.CASCADE)
parameters = JSONField(blank=True, default=dict, encoder=CustomJsonEncoder)
user = models.ForeignKey(
settings.AUTH_USER_MODEL,
blank=True,
null=True,
on_delete=models.SET_NULL,
related_name="+",
)
class Meta:
ordering = ("date",)
def __repr__(self):
return f"{self.__class__.__name__}(type={self.type!r}, user={self.user!r})"
| 33.82342 | 88 | 0.678903 |
e5c669c7e56dd7feb04561c5ab7bb8dd365e40e0 | 2,287 | py | Python | node/test_mc.py | mediachainlabs/cccoin | 95a7817e02011344508ce0af6d291dff788d23b1 | [
"MIT"
] | 37 | 2017-03-09T19:29:57.000Z | 2021-05-07T12:41:56.000Z | node/test_mc.py | mediachainlabs/cccoin | 95a7817e02011344508ce0af6d291dff788d23b1 | [
"MIT"
] | 8 | 2017-03-08T21:24:07.000Z | 2017-03-09T21:03:04.000Z | node/test_mc.py | mediachainlabs/cccoin | 95a7817e02011344508ce0af6d291dff788d23b1 | [
"MIT"
] | 16 | 2017-03-09T19:44:35.000Z | 2022-01-23T09:09:20.000Z | import pytest
from subprocess import Popen
from signal import SIGTERM
from node_mc import MediachainQueue, MediachainClient
from node_core import client_post
import bitcoin as btc
from time import sleep
import shutil
from utils import find_open_port
from tornado.ioloop import IOLoop
# test that CCCoinCore writes to mediachain successfully
# Must have `mcnode` on your $PATH for this to work
# pytest fixture configuration (see conftest.py)
contract_args = {'start_at_current_block': True, 'settings_confirm_states': {'BLOCKCHAIN_CONFIRMED':1}}
@pytest.fixture
def mc_node_url(tmpdir):
"""
create an ephemeral mcnode that we can write to from CCCoin
:param tmpdir: pytest tmpdir fixture (for storing node data)
:param find_open_port: pytest fixture function we can call to find an open tcp port
:return:
"""
p2p_port = str(find_open_port())
control_port = str(find_open_port())
data_dir = str(tmpdir)
mcnode_process = Popen(["mcnode", "-d", data_dir, "-l", p2p_port, "-c", control_port])
mc_url = 'http://localhost:' + control_port
sleep(0.2)
yield mc_url
# cleanup
mcnode_process.send_signal(SIGTERM)
mcnode_process.wait()
shutil.rmtree(data_dir)
def test_cccoin_mc_write(mc_node_url, cccoin_core):
cccoin_core.mcq = MediachainQueue(mc_api_url=mc_node_url, default_namespace='cccoin')
the_pw = 'some big long brainwallet password'
priv = btc.sha256(the_pw)
pub = btc.privtopub(priv)
blind_post, unblind_post = client_post('http://foo',
'The Title ',
priv,
pub,
)
cccoin_core.submit_blind_action(blind_post)
cccoin_core.submit_unblind_action(unblind_post)
cccoin_core.cw.loop_once()
start_block = cccoin_core.cw.latest_block_num
while cccoin_core.cw.latest_block_num < start_block + 3: # not sure why / if this is the magic number, but it WFM...
cccoin_core.cw.loop_once()
sleep(0.1)
cccoin_core.mcq.wait_for_completion()
client = MediachainClient(mc_api_url=mc_node_url)
results = IOLoop.current().run_sync(lambda: client.query('SELECT * FROM cccoin'))
assert(len(results) > 0)
| 34.651515 | 120 | 0.686926 |
b3320b81a0876a033cd7c9cc733e7e44eb1b04ef | 6,925 | py | Python | packages/augur-core/tests/mock_templates/__init__.py | jalextowle/augur | 821653823438fd9d20eced2221c8f99f21606a02 | [
"MIT"
] | 885 | 2015-01-11T20:51:30.000Z | 2022-03-25T00:29:59.000Z | packages/augur-core/tests/mock_templates/__init__.py | jalextowle/augur | 821653823438fd9d20eced2221c8f99f21606a02 | [
"MIT"
] | 7,347 | 2015-01-17T01:05:24.000Z | 2021-11-02T17:28:19.000Z | packages/augur-core/tests/mock_templates/__init__.py | jalextowle/augur | 821653823438fd9d20eced2221c8f99f21606a02 | [
"MIT"
] | 283 | 2015-01-30T02:16:53.000Z | 2022-03-24T19:23:08.000Z | from os import path
from textwrap import dedent
from solc import compile_standard
from specifics import add_all
# TODO resolve relative source paths from the sources directory, not this directory
# used to resolve relative paths
BASE_PATH = path.dirname(path.abspath(__file__))
def resolve_relative_path(relativeFilePath):
return path.abspath(path.join(BASE_PATH, relativeFilePath))
COMPILATION_CACHE = resolve_relative_path('./compilation_cache')
class ContractDescription(object):
def __init__(self, contract_name, solidity_version):
self.name = contract_name
self.version = solidity_version
self.imports = set()
self.variables = {}
self.functions = {}
self.events = set()
@classmethod
def from_abi(cls, solidity_version, contract_name, abi):
self = cls(contract_name, solidity_version)
for thing in abi:
# print json.dumps(thing, indent=2, separators=',:')
type_ = thing['type']
if type_ == 'constructor':
inputs = thing['inputs']
state_mutability = thing['stateMutability'] # TODO can be public or internal
payable = thing['payable'] # TODO constructor can be payable
constructor = cls.make_constructor(inputs)
self.functions[''] = constructor
elif type_ == 'function':
name = thing['name']
inputs = thing['inputs']
outputs = thing['outputs']
state_mutability = thing['stateMutability']
constant = thing['constant'] # TODO how does this relate to stateMutability?
payable = thing['payable'] # TODO how does this relate to stateMutability?
new_variables, new_functions = cls.make_function(name, inputs, outputs, state_mutability)
self.variables.update(new_variables)
self.functions.update(new_functions)
elif type_ == 'event':
name = thing['name']
inputs = thing['inputs']
anonymous = thing['anonymous'] # TODO is this useful when we know 'name'?
event = cls.make_event(name, inputs)
self.events.add(event)
else:
raise ValueError('Unexpected abi type "{}" in: {}'.format(type_, abi))
return self
@staticmethod
def make_version(version):
return 'pragma solidity {};'.format(version)
@staticmethod
def make_event(name, inputs):
params = ', '.join('{} {}'.format(i['type'], i['name']) for i in inputs)
return "event {name}({params});".format(name=name, params=params)
@staticmethod
def make_constructor(inputs):
params = ', '.join('{} {}'.format(ContractDescription.get_type_with_storage(i['type']), i['name']) for i in inputs)
return "constructor({params}) public {{ }}".format(
params=params
)
@staticmethod
def make_function(function_name, inputs, outputs, state_mutability):
var_descriptions = [
{'name': 'mock_{}_{}_{}'.format(
function_name,
o['name'] or index,
'_'.join([t['type'] for t in inputs]).replace('[', '_').replace(']', '_')
),
'type': o['type']}
for index, o in enumerate(outputs)
]
functions = {}
params = ', '.join('{} {}'.format(ContractDescription.get_type_with_storage(i['type']), i['name']) for i in inputs)
returns_header = ', '.join('{} {}'.format(ContractDescription.get_type_with_storage(o['type']), o['name']) for o in outputs)
returns = ','.join(v['name'] for v in var_descriptions)
mutability = "" if state_mutability == "nonpayable" else state_mutability
mutability = "" if mutability == "pure" else mutability # TODO handle pure fns
functions[function_name] = (dedent("""\
function {name}({params}) public {mutability} returns ({returns_header}) {{
return ({returns});
}}
""".format(
name=function_name,
params=params,
mutability=mutability,
returns_header=returns_header,
returns=returns
)))
variables = {}
for v in var_descriptions:
functions[v['name']] = dedent("""\
function set_{name}({vartype} thing) public {{
{name} = thing;
}}
""".format(
name=v['name'],
vartype=v['type']
))
variables[v['name']] = '{vartype} private {name};'.format(name=v['name'], vartype=v['type'])
return variables, functions
@staticmethod
def get_type_with_storage(type_string):
if type_string in ['string', 'uint256[]', 'bytes32[]']:
return type_string + ' memory'
return type_string
def write(self, test_dir):
with open('{}/{}.sol'.format(test_dir, self.name), 'w') as f:
f.write(self.render())
def render(self):
source = self.make_version(self.version)
source += '\n\n'
source += '\n'.join("import 'ROOT/{}';".format(imp) for imp in self.imports)
source += '\n'
source += "contract {name} {{\n".format(name=self.name)
source += '\n'
source += '\n'.join(self.events)
source += '\n'
source += '\n'.join(self.variables.values())
source += '\n\n'
source += '\n'.join(self.functions.values())
source += '}'
return source
def generate_mock_contracts(solidity_version, contracts):
return add_all({
'Mock{}'.format(name): ContractDescription.from_abi(solidity_version, 'Mock{}'.format(name), abi)
for name, abi
in contracts.items()
if len(abi) != 0
})
def compile_contract(source_filepath, outputs, contracts_path, test_contracts_path):
compiler_parameter = {
'language': 'Solidity',
'sources': {
source_filepath: {
'urls': [source_filepath]
}
},
'settings': {
# TODO: Remove 'remappings' line below and update 'sources' line above
'remappings': [
'=%s/' % contracts_path,
],
'optimizer': {
'enabled': True,
'runs': 200
},
'outputSelection': {
"*": {
'*': outputs
}
}
}
}
if test_contracts_path:
# TODO: Remove 'remappings' line below and update 'sources' line above
compiler_parameter['settings']['remappings'].append(
'TEST=%s/' % test_contracts_path
)
return compile_standard(compiler_parameter, allow_paths=resolve_relative_path("../../"))
| 36.835106 | 132 | 0.560144 |
6a14b0cf7ac498415af478a3d63bf7ce60033078 | 633 | py | Python | sector/migrations/0002_industrieslandingpage_hero_image.py | uktrade/invest | 15b84c511839b46e81608fca9762d2df3f6df16c | [
"MIT"
] | 1 | 2019-01-18T03:50:46.000Z | 2019-01-18T03:50:46.000Z | sector/migrations/0002_industrieslandingpage_hero_image.py | uktrade/invest | 15b84c511839b46e81608fca9762d2df3f6df16c | [
"MIT"
] | 50 | 2018-01-24T18:04:08.000Z | 2019-01-03T03:30:30.000Z | sector/migrations/0002_industrieslandingpage_hero_image.py | uktrade/invest | 15b84c511839b46e81608fca9762d2df3f6df16c | [
"MIT"
] | 2 | 2018-02-12T15:20:52.000Z | 2019-01-18T03:51:52.000Z | # -*- coding: utf-8 -*-
# Generated by Django 1.11.9 on 2018-02-05 20:00
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('wagtailimages', '0019_delete_filter'),
('sector', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='SectorLandingPage',
name='hero_image',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='wagtailimages.Image'),
),
]
| 27.521739 | 149 | 0.655608 |
f1fc7ef0a67564fd7cea2708c801c2eec0309cea | 7,479 | py | Python | official/projects/volumetric_models/modeling/heads/segmentation_heads_3d.py | mcasanova1445/models | 37be0fdb4abccca633bb3199a4e6f3f71cd174d9 | [
"Apache-2.0"
] | 1 | 2020-09-14T10:46:07.000Z | 2020-09-14T10:46:07.000Z | official/projects/volumetric_models/modeling/heads/segmentation_heads_3d.py | mdsaifhaider/models | 7214e17eb425963ec3d0295be215d5d26deaeb32 | [
"Apache-2.0"
] | 8 | 2020-05-19T00:52:30.000Z | 2020-06-04T23:57:20.000Z | official/projects/volumetric_models/modeling/heads/segmentation_heads_3d.py | mdsaifhaider/models | 7214e17eb425963ec3d0295be215d5d26deaeb32 | [
"Apache-2.0"
] | 2 | 2021-10-07T04:47:04.000Z | 2021-12-18T04:18:19.000Z | # Copyright 2022 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Segmentation heads."""
from typing import Any, Union, Sequence, Mapping, Tuple
import tensorflow as tf
from official.modeling import tf_utils
@tf.keras.utils.register_keras_serializable(package='Vision')
class SegmentationHead3D(tf.keras.layers.Layer):
"""Segmentation head for 3D input."""
def __init__(self,
num_classes: int,
level: Union[int, str],
num_convs: int = 2,
num_filters: int = 256,
upsample_factor: int = 1,
activation: str = 'relu',
use_sync_bn: bool = False,
norm_momentum: float = 0.99,
norm_epsilon: float = 0.001,
use_batch_normalization: bool = False,
kernel_regularizer: tf.keras.regularizers.Regularizer = None,
bias_regularizer: tf.keras.regularizers.Regularizer = None,
output_logits: bool = True, # pytype: disable=annotation-type-mismatch # typed-keras
**kwargs):
"""Initialize params to build segmentation head.
Args:
num_classes: `int` number of mask classification categories. The number of
classes does not include background class.
level: `int` or `str`, level to use to build segmentation head.
num_convs: `int` number of stacked convolution before the last prediction
layer.
num_filters: `int` number to specify the number of filters used. Default
is 256.
upsample_factor: `int` number to specify the upsampling factor to generate
finer mask. Default 1 means no upsampling is applied.
activation: `string`, indicating which activation is used, e.g. 'relu',
'swish', etc.
use_sync_bn: `bool`, whether to use synchronized batch normalization
across different replicas.
norm_momentum: `float`, the momentum parameter of the normalization
layers.
norm_epsilon: `float`, the epsilon parameter of the normalization layers.
use_batch_normalization: A bool of whether to use batch normalization or
not.
kernel_regularizer: `tf.keras.regularizers.Regularizer` object for layer
kernel.
bias_regularizer: `tf.keras.regularizers.Regularizer` object for bias.
output_logits: A `bool` of whether to output logits or not. Default
is True. If set to False, output softmax.
**kwargs: other keyword arguments passed to Layer.
"""
super(SegmentationHead3D, self).__init__(**kwargs)
self._config_dict = {
'num_classes': num_classes,
'level': level,
'num_convs': num_convs,
'num_filters': num_filters,
'upsample_factor': upsample_factor,
'activation': activation,
'use_sync_bn': use_sync_bn,
'norm_momentum': norm_momentum,
'norm_epsilon': norm_epsilon,
'use_batch_normalization': use_batch_normalization,
'kernel_regularizer': kernel_regularizer,
'bias_regularizer': bias_regularizer,
'output_logits': output_logits
}
if tf.keras.backend.image_data_format() == 'channels_last':
self._bn_axis = -1
else:
self._bn_axis = 1
self._activation = tf_utils.get_activation(activation)
def build(self, input_shape: Union[tf.TensorShape, Sequence[tf.TensorShape]]):
"""Creates the variables of the segmentation head."""
conv_op = tf.keras.layers.Conv3D
conv_kwargs = {
'kernel_size': (3, 3, 3),
'padding': 'same',
'use_bias': False,
'kernel_initializer': tf.keras.initializers.RandomNormal(stddev=0.01),
'kernel_regularizer': self._config_dict['kernel_regularizer'],
}
final_kernel_size = (1, 1, 1)
bn_op = (
tf.keras.layers.experimental.SyncBatchNormalization
if self._config_dict['use_sync_bn'] else
tf.keras.layers.BatchNormalization)
bn_kwargs = {
'axis': self._bn_axis,
'momentum': self._config_dict['norm_momentum'],
'epsilon': self._config_dict['norm_epsilon'],
}
# Segmentation head layers.
self._convs = []
self._norms = []
for i in range(self._config_dict['num_convs']):
conv_name = 'segmentation_head_conv_{}'.format(i)
self._convs.append(
conv_op(
name=conv_name,
filters=self._config_dict['num_filters'],
**conv_kwargs))
norm_name = 'segmentation_head_norm_{}'.format(i)
if self._config_dict['use_batch_normalization']:
self._norms.append(bn_op(name=norm_name, **bn_kwargs))
self._classifier = conv_op(
name='segmentation_output',
filters=self._config_dict['num_classes'],
kernel_size=final_kernel_size,
padding='valid',
activation=None,
bias_initializer=tf.zeros_initializer(),
kernel_initializer=tf.keras.initializers.RandomNormal(stddev=0.01),
kernel_regularizer=self._config_dict['kernel_regularizer'],
bias_regularizer=self._config_dict['bias_regularizer'])
super(SegmentationHead3D, self).build(input_shape)
def call(self, inputs: Tuple[Union[tf.Tensor, Mapping[str, tf.Tensor]],
Union[tf.Tensor, Mapping[str, tf.Tensor]]]):
"""Forward pass of the segmentation head.
It supports both a tuple of 2 tensors or 2 dictionaries. The first is
backbone endpoints, and the second is decoder endpoints. When inputs are
tensors, they are from a single level of feature maps. When inputs are
dictionaries, they contain multiple levels of feature maps, where the key
is the index of feature map.
Args:
inputs: A tuple of 2 feature map tensors of shape
[batch, height_l, width_l, channels] or 2 dictionaries of tensors:
- key: A `str` of the level of the multilevel features.
- values: A `tf.Tensor` of the feature map tensors, whose shape is
[batch, height_l, width_l, channels].
The first is backbone endpoints, and the second is decoder endpoints.
Returns:
segmentation prediction mask: A `tf.Tensor` of the segmentation mask
scores predicted from input features.
"""
decoder_output = inputs[1]
x = decoder_output[str(self._config_dict['level'])] if isinstance(
decoder_output, dict) else decoder_output
for i, conv in enumerate(self._convs):
x = conv(x)
if self._norms:
x = self._norms[i](x)
x = self._activation(x)
x = tf.keras.layers.UpSampling3D(size=self._config_dict['upsample_factor'])(
x)
x = self._classifier(x)
return x if self._config_dict['output_logits'] else tf.keras.layers.Softmax(
dtype='float32')(
x)
def get_config(self) -> Mapping[str, Any]:
return self._config_dict
@classmethod
def from_config(cls, config: Mapping[str, Any]):
return cls(**config)
| 40.209677 | 101 | 0.667068 |
71f89c57b26393cedeca4f1fedab499a328747ac | 2,998 | py | Python | openGaussBase/testcase/GUC/FILELOCATION/Opengauss_Function_Guc_FileLocation_Case0062.py | opengauss-mirror/Yat | aef107a8304b94e5d99b4f1f36eb46755eb8919e | [
"MulanPSL-1.0"
] | null | null | null | openGaussBase/testcase/GUC/FILELOCATION/Opengauss_Function_Guc_FileLocation_Case0062.py | opengauss-mirror/Yat | aef107a8304b94e5d99b4f1f36eb46755eb8919e | [
"MulanPSL-1.0"
] | null | null | null | openGaussBase/testcase/GUC/FILELOCATION/Opengauss_Function_Guc_FileLocation_Case0062.py | opengauss-mirror/Yat | aef107a8304b94e5d99b4f1f36eb46755eb8919e | [
"MulanPSL-1.0"
] | null | null | null | """
Copyright (c) 2022 Huawei Technologies Co.,Ltd.
openGauss is licensed under Mulan PSL v2.
You can use this software according to the terms and conditions of the Mulan PSL v2.
You may obtain a copy of Mulan PSL v2 at:
http://license.coscl.org.cn/MulanPSL2
THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
See the Mulan PSL v2 for more details.
"""
"""
Case Type : GUC
Case Name : external_pid_file参数使用gs_guc set设置为空值
Description :
1、查看external_pid_file默认值;
source /opt/opengauss810/env
gs_guc check -D {cluster/dn1} -c external_pid_file
2、使用gs_guc set设置external_pid_file到问题路径下
gs_guc set -D {cluster/dn1} -c "external_pid_file="
3、重启使其生效,观察预期结果
gs_om -t stop && gs_om -t start
Expect :
1、显示默认值;
2、参数修改成功;
3、重启成功,预期结果正常。
History :
"""
import unittest
from testcase.utils.CommonSH import CommonSH
from testcase.utils.Constant import Constant
from testcase.utils.Logger import Logger
from yat.test import Node
from yat.test import macro
COMMONSH = CommonSH('PrimaryDbUser')
class GucTest(unittest.TestCase):
def setUp(self):
self.log = Logger()
self.constant = Constant()
self.log.info('==Guc_FileLocation_Case0062开始==')
self.db_user_node = Node(node='PrimaryDbUser')
status = COMMONSH.get_db_cluster_status()
self.assertTrue("Normal" in status or "Degraded" in status)
def test_startdb(self):
self.log.info("查询该参数默认值")
check = COMMONSH.execut_db_sql(f'''show external_pid_file;''')
self.log.info(check)
self.assertEqual("", check.split("\n")[-2].strip())
self.log.info("external_pid_file参数使用gs_guc set设置为空值")
gucsetcmd = '''source ''' + macro.DB_ENV_PATH \
+ ''';gs_guc set -D ''' + macro.DB_INSTANCE_PATH \
+ ''' -c "external_pid_file=' '"'''
self.log.info(gucsetcmd)
gucresult = self.db_user_node.sh(gucsetcmd).result()
self.assertIn('Success', gucresult)
self.log.info("重启数据库校验预期结果")
COMMONSH.restart_db_cluster()
result = COMMONSH.get_db_cluster_status()
self.assertTrue("Normal" in result or "Degraded" in result)
def tearDown(self):
self.log.info("恢复该参数默认值")
gucsetcmd = '''source ''' + macro.DB_ENV_PATH \
+ ''';gs_guc set -D ''' + macro.DB_INSTANCE_PATH \
+ ''' -c "external_pid_file=\'\'"'''
gucresult = self.db_user_node.sh(gucsetcmd).result()
self.log.info(gucresult)
COMMONSH.restart_db_cluster()
status = COMMONSH.get_db_cluster_status()
self.assertTrue("Normal" in status or "Degraded" in status)
self.log.info('==Guc_FileLocation_Case0062完成==')
| 35.690476 | 84 | 0.640093 |
6beee3608fcab1b76b6d81312daba15bf32dac19 | 1,310 | py | Python | naiveBayes.py | sayannath/Naive-Bayes-Theorem | e0bde1237ff7c3fb65ba15d7aa357493757f520b | [
"Apache-2.0"
] | 2 | 2019-07-20T20:02:25.000Z | 2021-03-24T15:46:36.000Z | naiveBayes.py | sayannath/Naive-Bayes-Theorem | e0bde1237ff7c3fb65ba15d7aa357493757f520b | [
"Apache-2.0"
] | null | null | null | naiveBayes.py | sayannath/Naive-Bayes-Theorem | e0bde1237ff7c3fb65ba15d7aa357493757f520b | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# coding: utf-8
# In[1]:
import os
# In[2]:
print(os.listdir())
# In[3]:
# importing the libraries
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
#to get graphs inline
get_ipython().run_line_magic('matplotlib', 'inline')
# In[4]:
dataSet = pd.read_csv('Social_Network_Ads.csv')
# In[5]:
dataSet.info()
# In[6]:
dataSet.head()
# In[7]:
# spliting data into dependent and independent matrix
X = dataSet.iloc[:,2:4].values
y = dataSet.iloc[:,4].values
# In[8]:
X
# In[9]:
y
# In[10]:
# splitting data into train and test
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2)
# In[11]:
X_test
# In[12]:
y_test
# In[13]:
# feature scaling
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
# In[14]:
X_train = sc.fit_transform(X_train)
X_test = sc.transform(X_test)
# In[15]:
from sklearn.naive_bayes import GaussianNB
# In[16]:
classifier = GaussianNB()
# In[17]:
classifier.fit(X_train, y_train)
# In[18]:
y_predict = classifier.predict(X_test)
# In[19]:
y_predict
# In[20]:
from sklearn.metrics import confusion_matrix
falseCaci = confusion_matrix(y_test, y_predict)
# In[21]:
falseCaci
| 9.160839 | 74 | 0.679389 |
ad34a4a5cb58171446c5e3efd06c8ded67db6c33 | 467 | py | Python | .base_lessons/File input output/Read file/read_file.py | dannymeijer/level-up-with-python | 1bd1169aafd0fdc124984c30edc7f0153626cf06 | [
"MIT"
] | null | null | null | .base_lessons/File input output/Read file/read_file.py | dannymeijer/level-up-with-python | 1bd1169aafd0fdc124984c30edc7f0153626cf06 | [
"MIT"
] | null | null | null | .base_lessons/File input output/Read file/read_file.py | dannymeijer/level-up-with-python | 1bd1169aafd0fdc124984c30edc7f0153626cf06 | [
"MIT"
] | null | null | null |
f = open("input.txt", "r") # here we open file "input.txt". Second argument used to identify that we want to read file
# Note: if you want to write to the file use "w" as second argument
for line in f.readlines(): # read lines
print each line
f.close() # It's important to close the file to free up any system resources.
f1 = open("input1.txt", "r")
print only first line of f1
do not forget to close file
| 31.133333 | 120 | 0.625268 |
79afdf09d0219e44d4b64bf794b7f9ab5c9424d0 | 14,366 | py | Python | _unittests/ut_mlmodel/test_extended_features.py | sdpython/mlinsights | bae59cda775a69bcce83b16b88df2f34a092cb60 | [
"MIT"
] | 48 | 2017-11-19T14:59:41.000Z | 2022-03-03T15:50:24.000Z | _unittests/ut_mlmodel/test_extended_features.py | sdpython/mlinsights | bae59cda775a69bcce83b16b88df2f34a092cb60 | [
"MIT"
] | 87 | 2017-11-20T00:10:32.000Z | 2021-11-20T01:48:09.000Z | _unittests/ut_mlmodel/test_extended_features.py | sdpython/mlinsights | bae59cda775a69bcce83b16b88df2f34a092cb60 | [
"MIT"
] | 12 | 2019-05-09T07:45:52.000Z | 2021-06-28T06:55:53.000Z | # -*- coding: utf-8 -*-
"""
@brief test log(time=2s)
"""
import unittest
import numpy
from scipy import sparse
from scipy.sparse import random as sparse_random
from sklearn.preprocessing import PolynomialFeatures
from pyquickhelper.pycode import ExtTestCase
from mlinsights.mlmodel import ExtendedFeatures
class TestExtendedFeatures(ExtTestCase):
def test_multiply(self):
x1 = numpy.arange(9.0).reshape((3, 3))
x2 = numpy.arange(3.0).reshape((3, 1))
r = numpy.multiply(x1, x2)
exp = numpy.array([[0., 0., 0.], [3., 4., 5.], [12., 14., 16.]])
self.assertEqual(r, exp)
def test_polynomial_features(self):
X1 = numpy.arange(6)[:, numpy.newaxis]
P1 = numpy.hstack([numpy.ones_like(X1),
X1, X1 ** 2, X1 ** 3])
deg1 = 3
X2 = numpy.arange(6).reshape((3, 2))
x1 = X2[:, :1]
x2 = X2[:, 1:]
P2 = numpy.hstack([x1 ** 0 * x2 ** 0,
x1 ** 1 * x2 ** 0,
x1 ** 0 * x2 ** 1,
x1 ** 2 * x2 ** 0,
x1 ** 1 * x2 ** 1,
x1 ** 0 * x2 ** 2])
deg2 = 2
for (deg, X, P) in [(deg1, X1, P1), (deg2, X2, P2)]:
poly = PolynomialFeatures(deg, include_bias=True)
P_test = poly.fit_transform(X)
self.assertEqual(P_test, P)
names = poly.get_feature_names()
ext = ExtendedFeatures(poly_degree=deg)
e_test = ext.fit_transform(X)
e_names = ext.get_feature_names()
self.assertEqual(len(names), len(e_names))
self.assertEqual(names, e_names)
self.assertEqual(P_test, P)
self.assertEqual(P_test.shape, e_test.shape)
self.assertEqual(P_test, e_test)
def test_polynomial_features_slow(self):
X1 = numpy.arange(6)[:, numpy.newaxis]
P1 = numpy.hstack([numpy.ones_like(X1),
X1, X1 ** 2, X1 ** 3])
deg1 = 3
X2 = numpy.arange(6).reshape((3, 2))
x1 = X2[:, :1]
x2 = X2[:, 1:]
P2 = numpy.hstack([x1 ** 0 * x2 ** 0,
x1 ** 1 * x2 ** 0,
x1 ** 0 * x2 ** 1,
x1 ** 2 * x2 ** 0,
x1 ** 1 * x2 ** 1,
x1 ** 0 * x2 ** 2])
deg2 = 2
for (deg, X, P) in [(deg1, X1, P1), (deg2, X2, P2)]:
poly = PolynomialFeatures(deg, include_bias=True)
P_test = poly.fit_transform(X)
self.assertEqual(P_test, P)
names = poly.get_feature_names()
ext = ExtendedFeatures(kind='poly-slow', poly_degree=deg)
e_test = ext.fit_transform(X)
e_names = ext.get_feature_names()
self.assertEqual(len(names), len(e_names))
self.assertEqual(names, e_names)
self.assertEqual(P_test, P)
self.assertEqual(P_test.shape, e_test.shape)
self.assertEqual(P_test, e_test)
def test_polynomial_features_nobias_ionly(self):
X1 = numpy.arange(6)[:, numpy.newaxis]
P1 = numpy.hstack([numpy.ones_like(X1),
X1, X1 ** 2, X1 ** 3])
deg1 = 3
X2 = numpy.arange(6).reshape((3, 2))
x1 = X2[:, :1]
x2 = X2[:, 1:]
P2 = numpy.hstack([x1 ** 0 * x2 ** 0,
x1 ** 1 * x2 ** 0,
x1 ** 0 * x2 ** 1,
x1 ** 2 * x2 ** 0,
x1 ** 1 * x2 ** 1,
x1 ** 0 * x2 ** 2])
deg2 = 2
for (deg, X, P) in [(deg1, X1, P1), (deg2, X2, P2)]:
fc = [1] if deg == 3 else [1, 2, 4]
poly = PolynomialFeatures(deg, include_bias=False,
interaction_only=True)
P_test = poly.fit_transform(X)
names = poly.get_feature_names()
self.assertEqual(P_test, P[:, fc])
ext = ExtendedFeatures(poly_degree=deg,
poly_include_bias=False,
poly_interaction_only=True)
e_test = ext.fit_transform(X)
e_names = ext.get_feature_names()
self.assertEqual(len(names), len(e_names))
self.assertEqual(names, e_names)
self.assertEqual(P_test, P[:, fc])
self.assertEqual(P_test.shape, e_test.shape)
self.assertEqual(P_test, e_test)
def test_polynomial_features_nobias_ionly_slow(self):
X1 = numpy.arange(6)[:, numpy.newaxis]
P1 = numpy.hstack([numpy.ones_like(X1),
X1, X1 ** 2, X1 ** 3])
deg1 = 3
X2 = numpy.arange(6).reshape((3, 2))
x1 = X2[:, :1]
x2 = X2[:, 1:]
P2 = numpy.hstack([x1 ** 0 * x2 ** 0,
x1 ** 1 * x2 ** 0,
x1 ** 0 * x2 ** 1,
x1 ** 2 * x2 ** 0,
x1 ** 1 * x2 ** 1,
x1 ** 0 * x2 ** 2])
deg2 = 2
for (deg, X, P) in [(deg1, X1, P1), (deg2, X2, P2)]:
fc = [1] if deg == 3 else [1, 2, 4]
poly = PolynomialFeatures(deg, include_bias=False,
interaction_only=True)
P_test = poly.fit_transform(X)
names = poly.get_feature_names()
self.assertEqual(P_test, P[:, fc])
ext = ExtendedFeatures(kind="poly-slow", poly_degree=deg,
poly_include_bias=False,
poly_interaction_only=True)
e_test = ext.fit_transform(X)
e_names = ext.get_feature_names()
self.assertEqual(len(names), len(e_names))
self.assertEqual(names, e_names)
self.assertEqual(P_test, P[:, fc])
self.assertEqual(P_test.shape, e_test.shape)
self.assertEqual(P_test, e_test)
def test_polynomial_features_bias_ionly(self):
X1 = numpy.arange(6)[:, numpy.newaxis]
P1 = numpy.hstack([numpy.ones_like(X1),
X1, X1 ** 2, X1 ** 3])
deg1 = 3
X2 = numpy.arange(6).reshape((3, 2))
x1 = X2[:, :1]
x2 = X2[:, 1:]
P2 = numpy.hstack([x1 ** 0 * x2 ** 0,
x1 ** 1 * x2 ** 0,
x1 ** 0 * x2 ** 1,
x1 ** 2 * x2 ** 0,
x1 ** 1 * x2 ** 1,
x1 ** 0 * x2 ** 2])
deg2 = 2
for (deg, X, P) in [(deg1, X1, P1), (deg2, X2, P2)]:
fc = [0, 1] if deg == 3 else [0, 1, 2, 4]
poly = PolynomialFeatures(deg, include_bias=True,
interaction_only=True)
P_test = poly.fit_transform(X)
names = poly.get_feature_names()
self.assertEqual(P_test, P[:, fc])
ext = ExtendedFeatures(poly_degree=deg,
poly_include_bias=True,
poly_interaction_only=True)
e_test = ext.fit_transform(X)
e_names = ext.get_feature_names()
self.assertEqual(len(names), len(e_names))
self.assertEqual(names, e_names)
self.assertEqual(P_test, P[:, fc])
self.assertEqual(P_test.shape, e_test.shape)
self.assertEqual(P_test, e_test)
def test_polynomial_features_bias_ionly_slow(self):
X1 = numpy.arange(6)[:, numpy.newaxis]
P1 = numpy.hstack([numpy.ones_like(X1),
X1, X1 ** 2, X1 ** 3])
deg1 = 3
X2 = numpy.arange(6).reshape((3, 2))
x1 = X2[:, :1]
x2 = X2[:, 1:]
P2 = numpy.hstack([x1 ** 0 * x2 ** 0,
x1 ** 1 * x2 ** 0,
x1 ** 0 * x2 ** 1,
x1 ** 2 * x2 ** 0,
x1 ** 1 * x2 ** 1,
x1 ** 0 * x2 ** 2])
deg2 = 2
for (deg, X, P) in [(deg1, X1, P1), (deg2, X2, P2)]:
fc = [0, 1] if deg == 3 else [0, 1, 2, 4]
poly = PolynomialFeatures(deg, include_bias=True,
interaction_only=True)
P_test = poly.fit_transform(X)
names = poly.get_feature_names()
self.assertEqual(P_test, P[:, fc])
ext = ExtendedFeatures(kind="poly-slow", poly_degree=deg,
poly_include_bias=True,
poly_interaction_only=True)
e_test = ext.fit_transform(X)
e_names = ext.get_feature_names()
self.assertEqual(len(names), len(e_names))
self.assertEqual(names, e_names)
self.assertEqual(P_test, P[:, fc])
self.assertEqual(P_test.shape, e_test.shape)
self.assertEqual(P_test, e_test)
def test_polynomial_features_nobias(self):
X1 = numpy.arange(6)[:, numpy.newaxis]
P1 = numpy.hstack([numpy.ones_like(X1),
X1, X1 ** 2, X1 ** 3])
deg1 = 3
X2 = numpy.arange(6).reshape((3, 2))
x1 = X2[:, :1]
x2 = X2[:, 1:]
P2 = numpy.hstack([x1 ** 0 * x2 ** 0,
x1 ** 1 * x2 ** 0,
x1 ** 0 * x2 ** 1,
x1 ** 2 * x2 ** 0,
x1 ** 1 * x2 ** 1,
x1 ** 0 * x2 ** 2])
deg2 = 2
for (deg, X, P) in [(deg1, X1, P1), (deg2, X2, P2)]:
poly = PolynomialFeatures(deg, include_bias=False)
P_test = poly.fit_transform(X)
self.assertEqual(P_test, P[:, 1:])
names = poly.get_feature_names()
ext = ExtendedFeatures(poly_degree=deg, poly_include_bias=False)
e_test = ext.fit_transform(X)
self.assertEqual(P_test, P[:, 1:])
e_names = ext.get_feature_names()
self.assertEqual(len(names), len(e_names))
self.assertEqual(names, e_names)
self.assertEqual(P_test.shape, e_test.shape)
self.assertEqual(P_test, e_test)
def test_polynomial_features_bigger(self):
X = numpy.arange(30).reshape((5, 6))
for deg in (1, 2, 3, 4):
poly = PolynomialFeatures(deg, include_bias=True)
X_sk = poly.fit_transform(X)
names_sk = poly.get_feature_names()
ext = ExtendedFeatures(poly_degree=deg)
X_ext = ext.fit_transform(X)
inames = ["x%d" % i for i in range(0, X.shape[1])]
names_ext = ext.get_feature_names(inames)
self.assertEqual(len(names_sk), len(names_ext))
self.assertEqual(names_sk, names_ext)
names_ext = ext.get_feature_names()
self.assertEqual(len(names_sk), len(names_ext))
self.assertEqual(names_sk, names_ext)
self.assertEqual(X_sk.shape, X_ext.shape)
self.assertEqual(X_sk, X_ext)
def test_polynomial_features_bigger_ionly(self):
X = numpy.arange(30).reshape((5, 6))
for deg in (1, 2, 3, 4, 5):
poly = PolynomialFeatures(deg, include_bias=True,
interaction_only=True)
X_sk = poly.fit_transform(X)
names_sk = poly.get_feature_names()
ext = ExtendedFeatures(poly_degree=deg, poly_include_bias=True,
poly_interaction_only=True)
X_ext = ext.fit_transform(X)
inames = ["x%d" % i for i in range(0, X.shape[1])]
names_ext = ext.get_feature_names(inames)
self.assertEqual(len(names_sk), len(names_ext))
self.assertEqual(names_sk, names_ext)
names_ext = ext.get_feature_names()
self.assertEqual(len(names_sk), len(names_ext))
self.assertEqual(names_sk, names_ext)
self.assertEqual(X_sk.shape, X_ext.shape)
self.assertEqual(X_sk, X_ext)
@unittest.skip(reason="sparse not implemented for polynomial features")
def test_polynomial_features_sparse(self):
dtype = numpy.float64
rng = numpy.random.RandomState(0) # pylint: disable=E1101
X = rng.randint(0, 2, (100, 2))
X_sparse = sparse.csr_matrix(X)
est = PolynomialFeatures(2)
Xt_sparse = est.fit_transform(X_sparse.astype(dtype))
Xt_dense = est.fit_transform(X.astype(dtype))
self.assertIsInstance(
Xt_sparse, (sparse.csc_matrix, sparse.csr_matrix))
self.assertEqual(Xt_sparse.dtype, Xt_dense.dtype)
self.assertEqual(Xt_sparse.A, Xt_dense)
est = ExtendedFeatures(poly_degree=2)
Xt_sparse = est.fit_transform(X_sparse.astype(dtype))
Xt_dense = est.fit_transform(X.astype(dtype))
self.assertIsInstance(Xt_sparse, sparse.csc_matrix)
self.assertEqual(Xt_sparse.dtype, Xt_dense.dtype)
self.assertEqual(Xt_sparse.A, Xt_dense)
def polynomial_features_csr_X_zero_row(self, zero_row_index, deg, interaction_only):
X_csr = sparse_random(3, 10, 1.0, random_state=0).tocsr()
X_csr[zero_row_index, :] = 0.0
X = X_csr.toarray()
est = ExtendedFeatures(poly_degree=deg, poly_include_bias=False,
poly_interaction_only=interaction_only)
est.fit(X)
poly = PolynomialFeatures(degree=deg, include_bias=False,
interaction_only=interaction_only)
poly.fit(X)
self.assertEqual(poly.get_feature_names(), est.get_feature_names())
Xt_dense1 = est.fit_transform(X)
Xt_dense2 = poly.fit_transform(X)
self.assertEqual(Xt_dense1, Xt_dense2)
def test_polynomial_features_bug(self):
for p in [(0, 3, True), (0, 2, True), (1, 2, True),
(2, 2, True), (1, 3, True), (2, 3, True),
(0, 2, False), (1, 2, False), (2, 2, False),
(0, 3, False), (1, 3, False), (2, 3, False)]:
self.polynomial_features_csr_X_zero_row(*list(p))
if __name__ == "__main__":
unittest.main()
| 38.309333 | 88 | 0.501601 |
800d469deab6be0fb7c1a642d52f9a8f39dc9753 | 11,629 | py | Python | google/cloud/sql/v1/sql-v1-py/google/cloud/sql_v1/services/sql_tiers_service/transports/grpc.py | googleapis/googleapis-gen | d84824c78563d59b0e58d5664bfaa430e9ad7e7a | [
"Apache-2.0"
] | 7 | 2021-02-21T10:39:41.000Z | 2021-12-07T07:31:28.000Z | google/cloud/sql/v1/sql-v1-py/google/cloud/sql_v1/services/sql_tiers_service/transports/grpc.py | googleapis/googleapis-gen | d84824c78563d59b0e58d5664bfaa430e9ad7e7a | [
"Apache-2.0"
] | 6 | 2021-02-02T23:46:11.000Z | 2021-11-15T01:46:02.000Z | google/cloud/sql/v1/sql-v1-py/google/cloud/sql_v1/services/sql_tiers_service/transports/grpc.py | googleapis/googleapis-gen | d84824c78563d59b0e58d5664bfaa430e9ad7e7a | [
"Apache-2.0"
] | 4 | 2021-01-28T23:25:45.000Z | 2021-08-30T01:55:16.000Z | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Callable, Dict, Optional, Sequence, Tuple, Union
from google.api_core import grpc_helpers # type: ignore
from google.api_core import gapic_v1 # type: ignore
import google.auth # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import grpc # type: ignore
from google.cloud.sql_v1.types import cloud_sql_tiers
from .base import SqlTiersServiceTransport, DEFAULT_CLIENT_INFO
class SqlTiersServiceGrpcTransport(SqlTiersServiceTransport):
"""gRPC backend transport for SqlTiersService.
Service for providing machine types (tiers) for Cloud SQL
instances.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
_stubs: Dict[str, Callable]
def __init__(self, *,
host: str = 'sqladmin.googleapis.com',
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Sequence[str] = None,
channel: grpc.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional(Sequence[str])): A list of scopes. This argument is
ignored if ``channel`` is provided.
channel (Optional[grpc.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or application default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for the grpc channel. It is ignored if ``channel`` is provided.
client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
A callback to provide client certificate bytes and private key bytes,
both in PEM format. It is used to configure a mutual TLS channel. It is
ignored if ``channel`` or ``ssl_channel_credentials`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
self._grpc_channel = None
self._ssl_channel_credentials = ssl_channel_credentials
self._stubs: Dict[str, Callable] = {}
if api_mtls_endpoint:
warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
if client_cert_source:
warnings.warn("client_cert_source is deprecated", DeprecationWarning)
if channel:
# Ignore credentials if a channel was passed.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
else:
if api_mtls_endpoint:
host = api_mtls_endpoint
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
self._ssl_channel_credentials = SslCredentials().ssl_credentials
else:
if client_cert_source_for_mtls and not ssl_channel_credentials:
cert, key = client_cert_source_for_mtls()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
# The base transport sets the host, credentials and scopes
super().__init__(
host=host,
credentials=credentials,
credentials_file=credentials_file,
scopes=scopes,
quota_project_id=quota_project_id,
client_info=client_info,
always_use_jwt_access=always_use_jwt_access,
)
if not self._grpc_channel:
self._grpc_channel = type(self).create_channel(
self._host,
credentials=self._credentials,
credentials_file=credentials_file,
scopes=self._scopes,
ssl_credentials=self._ssl_channel_credentials,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Wrap messages. This must be done after self._grpc_channel exists
self._prep_wrapped_messages(client_info)
@classmethod
def create_channel(cls,
host: str = 'sqladmin.googleapis.com',
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
**kwargs) -> grpc.Channel:
"""Create and return a gRPC channel object.
Args:
host (Optional[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is mutually exclusive with credentials.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
grpc.Channel: A gRPC channel object.
Raises:
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
return grpc_helpers.create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
quota_project_id=quota_project_id,
default_scopes=cls.AUTH_SCOPES,
scopes=scopes,
default_host=cls.DEFAULT_HOST,
**kwargs
)
@property
def grpc_channel(self) -> grpc.Channel:
"""Return the channel designed to connect to this service.
"""
return self._grpc_channel
@property
def list(self) -> Callable[
[cloud_sql_tiers.SqlTiersListRequest],
cloud_sql_tiers.TiersListResponse]:
r"""Return a callable for the list method over gRPC.
Lists all available machine types (tiers) for Cloud
SQL, for example, db-custom-1-3840. For more
information, see https://cloud.google.com/sql/pricing.
Returns:
Callable[[~.SqlTiersListRequest],
~.TiersListResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'list' not in self._stubs:
self._stubs['list'] = self.grpc_channel.unary_unary(
'/google.cloud.sql.v1.SqlTiersService/List',
request_serializer=cloud_sql_tiers.SqlTiersListRequest.serialize,
response_deserializer=cloud_sql_tiers.TiersListResponse.deserialize,
)
return self._stubs['list']
def close(self):
self.grpc_channel.close()
__all__ = (
'SqlTiersServiceGrpcTransport',
)
| 45.073643 | 87 | 0.625247 |
b63231845bbd8971cac19663d6a67a48b82f4060 | 1,512 | py | Python | scripts/tests/test_migrate_dates.py | DanielSBrown/osf.io | 98dda2ac237377197acacce78274bc0a4ce8f303 | [
"Apache-2.0"
] | null | null | null | scripts/tests/test_migrate_dates.py | DanielSBrown/osf.io | 98dda2ac237377197acacce78274bc0a4ce8f303 | [
"Apache-2.0"
] | 13 | 2020-03-24T15:29:41.000Z | 2022-03-11T23:15:28.000Z | scripts/tests/test_migrate_dates.py | DanielSBrown/osf.io | 98dda2ac237377197acacce78274bc0a4ce8f303 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
import datetime
from nose.tools import * # noqa
from scripts.osfstorage.utils import ensure_osf_files
from website import settings
ensure_osf_files(settings)
# Hack: Must configure add-ons before importing `OsfTestCase`
from website.addons.osfstorage.tests.factories import FileVersionFactory
from website.addons.osfstorage.model import OsfStorageFileRecord
from website.addons.osffiles.model import NodeFile
from tests.base import OsfTestCase
from tests.factories import ProjectFactory
from scripts.osfstorage.migrate_dates import main
class TestMigrateDates(OsfTestCase):
def setUp(self):
super(TestMigrateDates, self).setUp()
self.path = 'old-pizza'
self.project = ProjectFactory()
self.node_settings = self.project.get_addon('osfstorage')
self.node_file = NodeFile(path=self.path)
self.node_file.save()
self.node_file.reload()
self.date = self.node_file.date_modified
self.project.files_versions['old_pizza'] = [self.node_file._id]
self.project.save()
self.version = FileVersionFactory(date_modified=datetime.datetime.now())
self.record, _ = OsfStorageFileRecord.get_or_create(self.node_file.path, self.node_settings)
self.record.versions = [self.version]
self.record.save()
def test_migrate_dates(self):
assert_not_equal(self.version.date_modified, self.date)
main(dry_run=False)
assert_equal(self.version.date_created, self.date)
| 36.878049 | 100 | 0.738095 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.